279ef521fde81048d7b27380c060769273495882
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53
54 #include "i40e_logs.h"
55 #include "i40e/i40e_prototype.h"
56 #include "i40e/i40e_adminq_cmd.h"
57 #include "i40e/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
60 #include "i40e_pf.h"
61
62 /* Maximun number of MAC addresses */
63 #define I40E_NUM_MACADDR_MAX       64
64 #define I40E_CLEAR_PXE_WAIT_MS     200
65
66 /* Maximun number of capability elements */
67 #define I40E_MAX_CAP_ELE_NUM       128
68
69 /* Wait count and inteval */
70 #define I40E_CHK_Q_ENA_COUNT       1000
71 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72
73 /* Maximun number of VSI */
74 #define I40E_MAX_NUM_VSIS          (384UL)
75
76 /* Bit shift and mask */
77 #define I40E_16_BIT_SHIFT 16
78 #define I40E_16_BIT_MASK  0xFFFF
79 #define I40E_32_BIT_SHIFT 32
80 #define I40E_32_BIT_MASK  0xFFFFFFFF
81 #define I40E_48_BIT_SHIFT 48
82 #define I40E_48_BIT_MASK  0xFFFFFFFFFFFFULL
83
84 /* Default queue interrupt throttling time in microseconds*/
85 #define I40E_ITR_INDEX_DEFAULT          0
86 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
87 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
88
89 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
90
91 /* Mask of PF interrupt causes */
92 #define I40E_PFINT_ICR0_ENA_MASK ( \
93                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
94                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
95                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
96                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
97                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
98                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
99                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
100                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
101                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
102                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
103
104 static int eth_i40e_dev_init(\
105                         __attribute__((unused)) struct eth_driver *eth_drv,
106                         struct rte_eth_dev *eth_dev);
107 static int i40e_dev_configure(struct rte_eth_dev *dev);
108 static int i40e_dev_start(struct rte_eth_dev *dev);
109 static void i40e_dev_stop(struct rte_eth_dev *dev);
110 static void i40e_dev_close(struct rte_eth_dev *dev);
111 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
112 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
113 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
114 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
115 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
116 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
117 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
118                                struct rte_eth_stats *stats);
119 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
120 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
121                                             uint16_t queue_id,
122                                             uint8_t stat_idx,
123                                             uint8_t is_rx);
124 static void i40e_dev_info_get(struct rte_eth_dev *dev,
125                               struct rte_eth_dev_info *dev_info);
126 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
127                                 uint16_t vlan_id,
128                                 int on);
129 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
130 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
131 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
132                                       uint16_t queue,
133                                       int on);
134 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
135 static int i40e_dev_led_on(struct rte_eth_dev *dev);
136 static int i40e_dev_led_off(struct rte_eth_dev *dev);
137 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
138                               struct rte_eth_fc_conf *fc_conf);
139 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
140                                        struct rte_eth_pfc_conf *pfc_conf);
141 static void i40e_macaddr_add(struct rte_eth_dev *dev,
142                           struct ether_addr *mac_addr,
143                           uint32_t index,
144                           uint32_t pool);
145 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
146 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
147                                     struct rte_eth_rss_reta *reta_conf);
148 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
149                                    struct rte_eth_rss_reta *reta_conf);
150
151 static int i40e_get_cap(struct i40e_hw *hw);
152 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
153 static int i40e_pf_setup(struct i40e_pf *pf);
154 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
155 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
156 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
157                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
158 static void i40e_stat_update_48(struct i40e_hw *hw,
159                                uint32_t hireg,
160                                uint32_t loreg,
161                                bool offset_loaded,
162                                uint64_t *offset,
163                                uint64_t *stat);
164 static void i40e_pf_config_irq0(struct i40e_hw *hw);
165 static void i40e_dev_interrupt_handler(
166                 __rte_unused struct rte_intr_handle *handle, void *param);
167 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
168                                 uint32_t base, uint32_t num);
169 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
170 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
171                         uint32_t base);
172 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
173                         uint16_t num);
174 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
175 static int i40e_veb_release(struct i40e_veb *veb);
176 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
177                                                 struct i40e_vsi *vsi);
178 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
179 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
180 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
181                                              struct i40e_macvlan_filter *mv_f,
182                                              int num,
183                                              struct ether_addr *addr);
184 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
185                                              struct i40e_macvlan_filter *mv_f,
186                                              int num,
187                                              uint16_t vlan);
188 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
189 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
190                                     struct rte_eth_rss_conf *rss_conf);
191 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
192                                       struct rte_eth_rss_conf *rss_conf);
193 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
194                                 struct rte_eth_udp_tunnel *udp_tunnel);
195 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
196                                 struct rte_eth_udp_tunnel *udp_tunnel);
197 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
198                                 enum rte_filter_type filter_type,
199                                 enum rte_filter_op filter_op,
200                                 void *arg);
201
202 /* Default hash key buffer for RSS */
203 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
204
205 static struct rte_pci_id pci_id_i40e_map[] = {
206 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
207 #include "rte_pci_dev_ids.h"
208 { .vendor_id = 0, /* sentinel */ },
209 };
210
211 static struct eth_dev_ops i40e_eth_dev_ops = {
212         .dev_configure                = i40e_dev_configure,
213         .dev_start                    = i40e_dev_start,
214         .dev_stop                     = i40e_dev_stop,
215         .dev_close                    = i40e_dev_close,
216         .promiscuous_enable           = i40e_dev_promiscuous_enable,
217         .promiscuous_disable          = i40e_dev_promiscuous_disable,
218         .allmulticast_enable          = i40e_dev_allmulticast_enable,
219         .allmulticast_disable         = i40e_dev_allmulticast_disable,
220         .dev_set_link_up              = i40e_dev_set_link_up,
221         .dev_set_link_down            = i40e_dev_set_link_down,
222         .link_update                  = i40e_dev_link_update,
223         .stats_get                    = i40e_dev_stats_get,
224         .stats_reset                  = i40e_dev_stats_reset,
225         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
226         .dev_infos_get                = i40e_dev_info_get,
227         .vlan_filter_set              = i40e_vlan_filter_set,
228         .vlan_tpid_set                = i40e_vlan_tpid_set,
229         .vlan_offload_set             = i40e_vlan_offload_set,
230         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
231         .vlan_pvid_set                = i40e_vlan_pvid_set,
232         .rx_queue_start               = i40e_dev_rx_queue_start,
233         .rx_queue_stop                = i40e_dev_rx_queue_stop,
234         .tx_queue_start               = i40e_dev_tx_queue_start,
235         .tx_queue_stop                = i40e_dev_tx_queue_stop,
236         .rx_queue_setup               = i40e_dev_rx_queue_setup,
237         .rx_queue_release             = i40e_dev_rx_queue_release,
238         .rx_queue_count               = i40e_dev_rx_queue_count,
239         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
240         .tx_queue_setup               = i40e_dev_tx_queue_setup,
241         .tx_queue_release             = i40e_dev_tx_queue_release,
242         .dev_led_on                   = i40e_dev_led_on,
243         .dev_led_off                  = i40e_dev_led_off,
244         .flow_ctrl_set                = i40e_flow_ctrl_set,
245         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
246         .mac_addr_add                 = i40e_macaddr_add,
247         .mac_addr_remove              = i40e_macaddr_remove,
248         .reta_update                  = i40e_dev_rss_reta_update,
249         .reta_query                   = i40e_dev_rss_reta_query,
250         .rss_hash_update              = i40e_dev_rss_hash_update,
251         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
252         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
253         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
254         .filter_ctrl                  = i40e_dev_filter_ctrl,
255 };
256
257 static struct eth_driver rte_i40e_pmd = {
258         {
259                 .name = "rte_i40e_pmd",
260                 .id_table = pci_id_i40e_map,
261                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
262         },
263         .eth_dev_init = eth_i40e_dev_init,
264         .dev_private_size = sizeof(struct i40e_adapter),
265 };
266
267 static inline int
268 i40e_align_floor(int n)
269 {
270         if (n == 0)
271                 return 0;
272         return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
273 }
274
275 static inline int
276 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
277                                      struct rte_eth_link *link)
278 {
279         struct rte_eth_link *dst = link;
280         struct rte_eth_link *src = &(dev->data->dev_link);
281
282         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
283                                         *(uint64_t *)src) == 0)
284                 return -1;
285
286         return 0;
287 }
288
289 static inline int
290 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
291                                       struct rte_eth_link *link)
292 {
293         struct rte_eth_link *dst = &(dev->data->dev_link);
294         struct rte_eth_link *src = link;
295
296         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297                                         *(uint64_t *)src) == 0)
298                 return -1;
299
300         return 0;
301 }
302
303 /*
304  * Driver initialization routine.
305  * Invoked once at EAL init time.
306  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
307  */
308 static int
309 rte_i40e_pmd_init(const char *name __rte_unused,
310                   const char *params __rte_unused)
311 {
312         PMD_INIT_FUNC_TRACE();
313         rte_eth_driver_register(&rte_i40e_pmd);
314
315         return 0;
316 }
317
318 static struct rte_driver rte_i40e_driver = {
319         .type = PMD_PDEV,
320         .init = rte_i40e_pmd_init,
321 };
322
323 PMD_REGISTER_DRIVER(rte_i40e_driver);
324
325 static int
326 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
327                   struct rte_eth_dev *dev)
328 {
329         struct rte_pci_device *pci_dev;
330         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
331         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
332         struct i40e_vsi *vsi;
333         int ret;
334         uint32_t len;
335         uint8_t aq_fail = 0;
336
337         PMD_INIT_FUNC_TRACE();
338
339         dev->dev_ops = &i40e_eth_dev_ops;
340         dev->rx_pkt_burst = i40e_recv_pkts;
341         dev->tx_pkt_burst = i40e_xmit_pkts;
342
343         /* for secondary processes, we don't initialise any further as primary
344          * has already done this work. Only check we don't need a different
345          * RX function */
346         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
347                 if (dev->data->scattered_rx)
348                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
349                 return 0;
350         }
351         pci_dev = dev->pci_dev;
352         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
353         pf->adapter->eth_dev = dev;
354         pf->dev_data = dev->data;
355
356         hw->back = I40E_PF_TO_ADAPTER(pf);
357         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
358         if (!hw->hw_addr) {
359                 PMD_INIT_LOG(ERR, "Hardware is not available, "
360                              "as address is NULL");
361                 return -ENODEV;
362         }
363
364         hw->vendor_id = pci_dev->id.vendor_id;
365         hw->device_id = pci_dev->id.device_id;
366         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
367         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
368         hw->bus.device = pci_dev->addr.devid;
369         hw->bus.func = pci_dev->addr.function;
370
371         /* Make sure all is clean before doing PF reset */
372         i40e_clear_hw(hw);
373
374         /* Reset here to make sure all is clean for each PF */
375         ret = i40e_pf_reset(hw);
376         if (ret) {
377                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
378                 return ret;
379         }
380
381         /* Initialize the shared code (base driver) */
382         ret = i40e_init_shared_code(hw);
383         if (ret) {
384                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
385                 return ret;
386         }
387
388         /* Initialize the parameters for adminq */
389         i40e_init_adminq_parameter(hw);
390         ret = i40e_init_adminq(hw);
391         if (ret != I40E_SUCCESS) {
392                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
393                 return -EIO;
394         }
395         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
396                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
397                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
398                      ((hw->nvm.version >> 12) & 0xf),
399                      ((hw->nvm.version >> 4) & 0xff),
400                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
401
402         /* Disable LLDP */
403         ret = i40e_aq_stop_lldp(hw, true, NULL);
404         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
405                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
406
407         /* Clear PXE mode */
408         i40e_clear_pxe_mode(hw);
409
410         /* Get hw capabilities */
411         ret = i40e_get_cap(hw);
412         if (ret != I40E_SUCCESS) {
413                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
414                 goto err_get_capabilities;
415         }
416
417         /* Initialize parameters for PF */
418         ret = i40e_pf_parameter_init(dev);
419         if (ret != 0) {
420                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
421                 goto err_parameter_init;
422         }
423
424         /* Initialize the queue management */
425         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
426         if (ret < 0) {
427                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
428                 goto err_qp_pool_init;
429         }
430         ret = i40e_res_pool_init(&pf->msix_pool, 1,
431                                 hw->func_caps.num_msix_vectors - 1);
432         if (ret < 0) {
433                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
434                 goto err_msix_pool_init;
435         }
436
437         /* Initialize lan hmc */
438         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
439                                 hw->func_caps.num_rx_qp, 0, 0);
440         if (ret != I40E_SUCCESS) {
441                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
442                 goto err_init_lan_hmc;
443         }
444
445         /* Configure lan hmc */
446         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
447         if (ret != I40E_SUCCESS) {
448                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
449                 goto err_configure_lan_hmc;
450         }
451
452         /* Get and check the mac address */
453         i40e_get_mac_addr(hw, hw->mac.addr);
454         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
455                 PMD_INIT_LOG(ERR, "mac address is not valid");
456                 ret = -EIO;
457                 goto err_get_mac_addr;
458         }
459         /* Copy the permanent MAC address */
460         ether_addr_copy((struct ether_addr *) hw->mac.addr,
461                         (struct ether_addr *) hw->mac.perm_addr);
462
463         /* Disable flow control */
464         hw->fc.requested_mode = I40E_FC_NONE;
465         i40e_set_fc(hw, &aq_fail, TRUE);
466
467         /* PF setup, which includes VSI setup */
468         ret = i40e_pf_setup(pf);
469         if (ret) {
470                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
471                 goto err_setup_pf_switch;
472         }
473
474         vsi = pf->main_vsi;
475
476         /* Disable double vlan by default */
477         i40e_vsi_config_double_vlan(vsi, FALSE);
478
479         if (!vsi->max_macaddrs)
480                 len = ETHER_ADDR_LEN;
481         else
482                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
483
484         /* Should be after VSI initialized */
485         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
486         if (!dev->data->mac_addrs) {
487                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
488                                         "for storing mac address");
489                 goto err_mac_alloc;
490         }
491         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
492                                         &dev->data->mac_addrs[0]);
493
494         /* initialize pf host driver to setup SRIOV resource if applicable */
495         i40e_pf_host_init(dev);
496
497         /* register callback func to eal lib */
498         rte_intr_callback_register(&(pci_dev->intr_handle),
499                 i40e_dev_interrupt_handler, (void *)dev);
500
501         /* configure and enable device interrupt */
502         i40e_pf_config_irq0(hw);
503         i40e_pf_enable_irq0(hw);
504
505         /* enable uio intr after callback register */
506         rte_intr_enable(&(pci_dev->intr_handle));
507
508         return 0;
509
510 err_mac_alloc:
511         i40e_vsi_release(pf->main_vsi);
512 err_setup_pf_switch:
513 err_get_mac_addr:
514 err_configure_lan_hmc:
515         (void)i40e_shutdown_lan_hmc(hw);
516 err_init_lan_hmc:
517         i40e_res_pool_destroy(&pf->msix_pool);
518 err_msix_pool_init:
519         i40e_res_pool_destroy(&pf->qp_pool);
520 err_qp_pool_init:
521 err_parameter_init:
522 err_get_capabilities:
523         (void)i40e_shutdown_adminq(hw);
524
525         return ret;
526 }
527
528 static int
529 i40e_dev_configure(struct rte_eth_dev *dev)
530 {
531         int ret;
532         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
533
534         /* VMDQ setup.
535          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
536          *  RSS setting have different requirements.
537          *  General PMD driver call sequence are NIC init, configure,
538          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
539          *  will try to lookup the VSI that specific queue belongs to if VMDQ
540          *  applicable. So, VMDQ setting has to be done before
541          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
542          *  For RSS setting, it will try to calculate actual configured RX queue
543          *  number, which will be available after rx_queue_setup(). dev_start()
544          *  function is good to place RSS setup.
545          */
546         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
547                 ret = i40e_vmdq_setup(dev);
548                 if (ret)
549                         return ret;
550         }
551
552         return i40e_dev_init_vlan(dev);
553 }
554
555 void
556 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
557 {
558         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
559         uint16_t msix_vect = vsi->msix_intr;
560         uint16_t i;
561
562         for (i = 0; i < vsi->nb_qps; i++) {
563                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
564                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
565                 rte_wmb();
566         }
567
568         if (vsi->type != I40E_VSI_SRIOV) {
569                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
570                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
571                                 msix_vect - 1), 0);
572         } else {
573                 uint32_t reg;
574                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
575                         vsi->user_param + (msix_vect - 1);
576
577                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
578         }
579         I40E_WRITE_FLUSH(hw);
580 }
581
582 static inline uint16_t
583 i40e_calc_itr_interval(int16_t interval)
584 {
585         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
586                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
587
588         /* Convert to hardware count, as writing each 1 represents 2 us */
589         return (interval/2);
590 }
591
592 void
593 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
594 {
595         uint32_t val;
596         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
597         uint16_t msix_vect = vsi->msix_intr;
598         int i;
599
600         for (i = 0; i < vsi->nb_qps; i++)
601                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
602
603         /* Bind all RX queues to allocated MSIX interrupt */
604         for (i = 0; i < vsi->nb_qps; i++) {
605                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
606                         I40E_QINT_RQCTL_ITR_INDX_MASK |
607                         ((vsi->base_queue + i + 1) <<
608                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
609                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
610                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
611
612                 if (i == vsi->nb_qps - 1)
613                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
614                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
615         }
616
617         /* Write first RX queue to Link list register as the head element */
618         if (vsi->type != I40E_VSI_SRIOV) {
619                 uint16_t interval =
620                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
621
622                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
623                                                 (vsi->base_queue <<
624                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
625                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
626
627                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
628                                                 msix_vect - 1), interval);
629
630 #ifndef I40E_GLINT_CTL
631 #define I40E_GLINT_CTL                     0x0003F800
632 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
633 #endif
634                 /* Disable auto-mask on enabling of all none-zero  interrupt */
635                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
636                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
637         } else {
638                 uint32_t reg;
639
640                 /* num_msix_vectors_vf needs to minus irq0 */
641                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
642                         vsi->user_param + (msix_vect - 1);
643
644                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
645                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
646                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
647         }
648
649         I40E_WRITE_FLUSH(hw);
650 }
651
652 static void
653 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
654 {
655         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
656         uint16_t interval = i40e_calc_itr_interval(\
657                         RTE_LIBRTE_I40E_ITR_INTERVAL);
658
659         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
660                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
661                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
662                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
663                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
664 }
665
666 static void
667 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
668 {
669         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
670
671         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
672 }
673
674 static inline uint8_t
675 i40e_parse_link_speed(uint16_t eth_link_speed)
676 {
677         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
678
679         switch (eth_link_speed) {
680         case ETH_LINK_SPEED_40G:
681                 link_speed = I40E_LINK_SPEED_40GB;
682                 break;
683         case ETH_LINK_SPEED_20G:
684                 link_speed = I40E_LINK_SPEED_20GB;
685                 break;
686         case ETH_LINK_SPEED_10G:
687                 link_speed = I40E_LINK_SPEED_10GB;
688                 break;
689         case ETH_LINK_SPEED_1000:
690                 link_speed = I40E_LINK_SPEED_1GB;
691                 break;
692         case ETH_LINK_SPEED_100:
693                 link_speed = I40E_LINK_SPEED_100MB;
694                 break;
695         }
696
697         return link_speed;
698 }
699
700 static int
701 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
702 {
703         enum i40e_status_code status;
704         struct i40e_aq_get_phy_abilities_resp phy_ab;
705         struct i40e_aq_set_phy_config phy_conf;
706         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
707                         I40E_AQ_PHY_FLAG_PAUSE_RX |
708                         I40E_AQ_PHY_FLAG_LOW_POWER;
709         const uint8_t advt = I40E_LINK_SPEED_40GB |
710                         I40E_LINK_SPEED_10GB |
711                         I40E_LINK_SPEED_1GB |
712                         I40E_LINK_SPEED_100MB;
713         int ret = -ENOTSUP;
714
715         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
716                                               NULL);
717         if (status)
718                 return ret;
719
720         memset(&phy_conf, 0, sizeof(phy_conf));
721
722         /* bits 0-2 use the values from get_phy_abilities_resp */
723         abilities &= ~mask;
724         abilities |= phy_ab.abilities & mask;
725
726         /* update ablities and speed */
727         if (abilities & I40E_AQ_PHY_AN_ENABLED)
728                 phy_conf.link_speed = advt;
729         else
730                 phy_conf.link_speed = force_speed;
731
732         phy_conf.abilities = abilities;
733
734         /* use get_phy_abilities_resp value for the rest */
735         phy_conf.phy_type = phy_ab.phy_type;
736         phy_conf.eee_capability = phy_ab.eee_capability;
737         phy_conf.eeer = phy_ab.eeer_val;
738         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
739
740         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
741                     phy_ab.abilities, phy_ab.link_speed);
742         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
743                     phy_conf.abilities, phy_conf.link_speed);
744
745         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
746         if (status)
747                 return ret;
748
749         return I40E_SUCCESS;
750 }
751
752 static int
753 i40e_apply_link_speed(struct rte_eth_dev *dev)
754 {
755         uint8_t speed;
756         uint8_t abilities = 0;
757         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
758         struct rte_eth_conf *conf = &dev->data->dev_conf;
759
760         speed = i40e_parse_link_speed(conf->link_speed);
761         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
762         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
763                 abilities |= I40E_AQ_PHY_AN_ENABLED;
764         else
765                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
766
767         return i40e_phy_conf_link(hw, abilities, speed);
768 }
769
770 static int
771 i40e_dev_start(struct rte_eth_dev *dev)
772 {
773         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
774         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
775         struct i40e_vsi *main_vsi = pf->main_vsi;
776         int ret, i;
777
778         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
779                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
780                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
781                              dev->data->dev_conf.link_duplex,
782                              dev->data->port_id);
783                 return -EINVAL;
784         }
785
786         /* Initialize VSI */
787         ret = i40e_dev_rxtx_init(pf);
788         if (ret != I40E_SUCCESS) {
789                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
790                 goto err_up;
791         }
792
793         /* Map queues with MSIX interrupt */
794         i40e_vsi_queues_bind_intr(main_vsi);
795         i40e_vsi_enable_queues_intr(main_vsi);
796
797         /* Map VMDQ VSI queues with MSIX interrupt */
798         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
799                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
800                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
801         }
802
803         /* Enable all queues which have been configured */
804         ret = i40e_dev_switch_queues(pf, TRUE);
805         if (ret != I40E_SUCCESS) {
806                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
807                 goto err_up;
808         }
809
810         /* Enable receiving broadcast packets */
811         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
812         if (ret != I40E_SUCCESS)
813                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
814
815         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
816                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
817                                                 true, NULL);
818                 if (ret != I40E_SUCCESS)
819                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
820         }
821
822         /* Apply link configure */
823         ret = i40e_apply_link_speed(dev);
824         if (I40E_SUCCESS != ret) {
825                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
826                 goto err_up;
827         }
828
829         return I40E_SUCCESS;
830
831 err_up:
832         i40e_dev_switch_queues(pf, FALSE);
833         i40e_dev_clear_queues(dev);
834
835         return ret;
836 }
837
838 static void
839 i40e_dev_stop(struct rte_eth_dev *dev)
840 {
841         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
842         struct i40e_vsi *main_vsi = pf->main_vsi;
843         int i;
844
845         /* Disable all queues */
846         i40e_dev_switch_queues(pf, FALSE);
847
848         /* un-map queues with interrupt registers */
849         i40e_vsi_disable_queues_intr(main_vsi);
850         i40e_vsi_queues_unbind_intr(main_vsi);
851
852         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
853                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
854                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
855         }
856
857         /* Clear all queues and release memory */
858         i40e_dev_clear_queues(dev);
859
860         /* Set link down */
861         i40e_dev_set_link_down(dev);
862 }
863
864 static void
865 i40e_dev_close(struct rte_eth_dev *dev)
866 {
867         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869         uint32_t reg;
870
871         PMD_INIT_FUNC_TRACE();
872
873         i40e_dev_stop(dev);
874
875         /* Disable interrupt */
876         i40e_pf_disable_irq0(hw);
877         rte_intr_disable(&(dev->pci_dev->intr_handle));
878
879         /* shutdown and destroy the HMC */
880         i40e_shutdown_lan_hmc(hw);
881
882         /* release all the existing VSIs and VEBs */
883         i40e_vsi_release(pf->main_vsi);
884
885         /* shutdown the adminq */
886         i40e_aq_queue_shutdown(hw, true);
887         i40e_shutdown_adminq(hw);
888
889         i40e_res_pool_destroy(&pf->qp_pool);
890         i40e_res_pool_destroy(&pf->msix_pool);
891
892         /* force a PF reset to clean anything leftover */
893         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
894         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
895                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
896         I40E_WRITE_FLUSH(hw);
897 }
898
899 static void
900 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
901 {
902         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
903         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
904         struct i40e_vsi *vsi = pf->main_vsi;
905         int status;
906
907         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
908                                                         true, NULL);
909         if (status != I40E_SUCCESS)
910                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
911
912         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
913                                                         TRUE, NULL);
914         if (status != I40E_SUCCESS)
915                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
916
917 }
918
919 static void
920 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
921 {
922         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
923         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
924         struct i40e_vsi *vsi = pf->main_vsi;
925         int status;
926
927         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
928                                                         false, NULL);
929         if (status != I40E_SUCCESS)
930                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
931
932         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
933                                                         false, NULL);
934         if (status != I40E_SUCCESS)
935                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
936 }
937
938 static void
939 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
940 {
941         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
942         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
943         struct i40e_vsi *vsi = pf->main_vsi;
944         int ret;
945
946         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
947         if (ret != I40E_SUCCESS)
948                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
949 }
950
951 static void
952 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
953 {
954         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
955         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
956         struct i40e_vsi *vsi = pf->main_vsi;
957         int ret;
958
959         if (dev->data->promiscuous == 1)
960                 return; /* must remain in all_multicast mode */
961
962         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
963                                 vsi->seid, FALSE, NULL);
964         if (ret != I40E_SUCCESS)
965                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
966 }
967
968 /*
969  * Set device link up.
970  */
971 static int
972 i40e_dev_set_link_up(struct rte_eth_dev *dev)
973 {
974         /* re-apply link speed setting */
975         return i40e_apply_link_speed(dev);
976 }
977
978 /*
979  * Set device link down.
980  */
981 static int
982 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
983 {
984         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
985         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
986         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
987
988         return i40e_phy_conf_link(hw, abilities, speed);
989 }
990
991 int
992 i40e_dev_link_update(struct rte_eth_dev *dev,
993                      __rte_unused int wait_to_complete)
994 {
995         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
996         struct i40e_link_status link_status;
997         struct rte_eth_link link, old;
998         int status;
999
1000         memset(&link, 0, sizeof(link));
1001         memset(&old, 0, sizeof(old));
1002         memset(&link_status, 0, sizeof(link_status));
1003         rte_i40e_dev_atomic_read_link_status(dev, &old);
1004
1005         /* Get link status information from hardware */
1006         status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1007         if (status != I40E_SUCCESS) {
1008                 link.link_speed = ETH_LINK_SPEED_100;
1009                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1010                 PMD_DRV_LOG(ERR, "Failed to get link info");
1011                 goto out;
1012         }
1013
1014         link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1015
1016         if (!link.link_status)
1017                 goto out;
1018
1019         /* i40e uses full duplex only */
1020         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1021
1022         /* Parse the link status */
1023         switch (link_status.link_speed) {
1024         case I40E_LINK_SPEED_100MB:
1025                 link.link_speed = ETH_LINK_SPEED_100;
1026                 break;
1027         case I40E_LINK_SPEED_1GB:
1028                 link.link_speed = ETH_LINK_SPEED_1000;
1029                 break;
1030         case I40E_LINK_SPEED_10GB:
1031                 link.link_speed = ETH_LINK_SPEED_10G;
1032                 break;
1033         case I40E_LINK_SPEED_20GB:
1034                 link.link_speed = ETH_LINK_SPEED_20G;
1035                 break;
1036         case I40E_LINK_SPEED_40GB:
1037                 link.link_speed = ETH_LINK_SPEED_40G;
1038                 break;
1039         default:
1040                 link.link_speed = ETH_LINK_SPEED_100;
1041                 break;
1042         }
1043
1044 out:
1045         rte_i40e_dev_atomic_write_link_status(dev, &link);
1046         if (link.link_status == old.link_status)
1047                 return -1;
1048
1049         return 0;
1050 }
1051
1052 /* Get all the statistics of a VSI */
1053 void
1054 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1055 {
1056         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1057         struct i40e_eth_stats *nes = &vsi->eth_stats;
1058         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1059         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1060
1061         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1062                             vsi->offset_loaded, &oes->rx_bytes,
1063                             &nes->rx_bytes);
1064         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1065                             vsi->offset_loaded, &oes->rx_unicast,
1066                             &nes->rx_unicast);
1067         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1068                             vsi->offset_loaded, &oes->rx_multicast,
1069                             &nes->rx_multicast);
1070         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1071                             vsi->offset_loaded, &oes->rx_broadcast,
1072                             &nes->rx_broadcast);
1073         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1074                             &oes->rx_discards, &nes->rx_discards);
1075         /* GLV_REPC not supported */
1076         /* GLV_RMPC not supported */
1077         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1078                             &oes->rx_unknown_protocol,
1079                             &nes->rx_unknown_protocol);
1080         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1081                             vsi->offset_loaded, &oes->tx_bytes,
1082                             &nes->tx_bytes);
1083         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1084                             vsi->offset_loaded, &oes->tx_unicast,
1085                             &nes->tx_unicast);
1086         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1087                             vsi->offset_loaded, &oes->tx_multicast,
1088                             &nes->tx_multicast);
1089         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1090                             vsi->offset_loaded,  &oes->tx_broadcast,
1091                             &nes->tx_broadcast);
1092         /* GLV_TDPC not supported */
1093         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1094                             &oes->tx_errors, &nes->tx_errors);
1095         vsi->offset_loaded = true;
1096
1097         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1098                     vsi->vsi_id);
1099         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", nes->rx_bytes);
1100         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", nes->rx_unicast);
1101         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", nes->rx_multicast);
1102         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", nes->rx_broadcast);
1103         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", nes->rx_discards);
1104         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1105                     nes->rx_unknown_protocol);
1106         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", nes->tx_bytes);
1107         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", nes->tx_unicast);
1108         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", nes->tx_multicast);
1109         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", nes->tx_broadcast);
1110         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", nes->tx_discards);
1111         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", nes->tx_errors);
1112         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1113                     vsi->vsi_id);
1114 }
1115
1116 /* Get all statistics of a port */
1117 static void
1118 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1119 {
1120         uint32_t i;
1121         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1122         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1123         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1124         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1125
1126         /* Get statistics of struct i40e_eth_stats */
1127         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1128                             I40E_GLPRT_GORCL(hw->port),
1129                             pf->offset_loaded, &os->eth.rx_bytes,
1130                             &ns->eth.rx_bytes);
1131         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1132                             I40E_GLPRT_UPRCL(hw->port),
1133                             pf->offset_loaded, &os->eth.rx_unicast,
1134                             &ns->eth.rx_unicast);
1135         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1136                             I40E_GLPRT_MPRCL(hw->port),
1137                             pf->offset_loaded, &os->eth.rx_multicast,
1138                             &ns->eth.rx_multicast);
1139         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1140                             I40E_GLPRT_BPRCL(hw->port),
1141                             pf->offset_loaded, &os->eth.rx_broadcast,
1142                             &ns->eth.rx_broadcast);
1143         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1144                             pf->offset_loaded, &os->eth.rx_discards,
1145                             &ns->eth.rx_discards);
1146         /* GLPRT_REPC not supported */
1147         /* GLPRT_RMPC not supported */
1148         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1149                             pf->offset_loaded,
1150                             &os->eth.rx_unknown_protocol,
1151                             &ns->eth.rx_unknown_protocol);
1152         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1153                             I40E_GLPRT_GOTCL(hw->port),
1154                             pf->offset_loaded, &os->eth.tx_bytes,
1155                             &ns->eth.tx_bytes);
1156         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1157                             I40E_GLPRT_UPTCL(hw->port),
1158                             pf->offset_loaded, &os->eth.tx_unicast,
1159                             &ns->eth.tx_unicast);
1160         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1161                             I40E_GLPRT_MPTCL(hw->port),
1162                             pf->offset_loaded, &os->eth.tx_multicast,
1163                             &ns->eth.tx_multicast);
1164         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1165                             I40E_GLPRT_BPTCL(hw->port),
1166                             pf->offset_loaded, &os->eth.tx_broadcast,
1167                             &ns->eth.tx_broadcast);
1168         i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1169                             pf->offset_loaded, &os->eth.tx_discards,
1170                             &ns->eth.tx_discards);
1171         /* GLPRT_TEPC not supported */
1172
1173         /* additional port specific stats */
1174         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1175                             pf->offset_loaded, &os->tx_dropped_link_down,
1176                             &ns->tx_dropped_link_down);
1177         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1178                             pf->offset_loaded, &os->crc_errors,
1179                             &ns->crc_errors);
1180         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1181                             pf->offset_loaded, &os->illegal_bytes,
1182                             &ns->illegal_bytes);
1183         /* GLPRT_ERRBC not supported */
1184         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1185                             pf->offset_loaded, &os->mac_local_faults,
1186                             &ns->mac_local_faults);
1187         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1188                             pf->offset_loaded, &os->mac_remote_faults,
1189                             &ns->mac_remote_faults);
1190         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1191                             pf->offset_loaded, &os->rx_length_errors,
1192                             &ns->rx_length_errors);
1193         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1194                             pf->offset_loaded, &os->link_xon_rx,
1195                             &ns->link_xon_rx);
1196         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1197                             pf->offset_loaded, &os->link_xoff_rx,
1198                             &ns->link_xoff_rx);
1199         for (i = 0; i < 8; i++) {
1200                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1201                                     pf->offset_loaded,
1202                                     &os->priority_xon_rx[i],
1203                                     &ns->priority_xon_rx[i]);
1204                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1205                                     pf->offset_loaded,
1206                                     &os->priority_xoff_rx[i],
1207                                     &ns->priority_xoff_rx[i]);
1208         }
1209         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1210                             pf->offset_loaded, &os->link_xon_tx,
1211                             &ns->link_xon_tx);
1212         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1213                             pf->offset_loaded, &os->link_xoff_tx,
1214                             &ns->link_xoff_tx);
1215         for (i = 0; i < 8; i++) {
1216                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1217                                     pf->offset_loaded,
1218                                     &os->priority_xon_tx[i],
1219                                     &ns->priority_xon_tx[i]);
1220                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1221                                     pf->offset_loaded,
1222                                     &os->priority_xoff_tx[i],
1223                                     &ns->priority_xoff_tx[i]);
1224                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1225                                     pf->offset_loaded,
1226                                     &os->priority_xon_2_xoff[i],
1227                                     &ns->priority_xon_2_xoff[i]);
1228         }
1229         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1230                             I40E_GLPRT_PRC64L(hw->port),
1231                             pf->offset_loaded, &os->rx_size_64,
1232                             &ns->rx_size_64);
1233         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1234                             I40E_GLPRT_PRC127L(hw->port),
1235                             pf->offset_loaded, &os->rx_size_127,
1236                             &ns->rx_size_127);
1237         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1238                             I40E_GLPRT_PRC255L(hw->port),
1239                             pf->offset_loaded, &os->rx_size_255,
1240                             &ns->rx_size_255);
1241         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1242                             I40E_GLPRT_PRC511L(hw->port),
1243                             pf->offset_loaded, &os->rx_size_511,
1244                             &ns->rx_size_511);
1245         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1246                             I40E_GLPRT_PRC1023L(hw->port),
1247                             pf->offset_loaded, &os->rx_size_1023,
1248                             &ns->rx_size_1023);
1249         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1250                             I40E_GLPRT_PRC1522L(hw->port),
1251                             pf->offset_loaded, &os->rx_size_1522,
1252                             &ns->rx_size_1522);
1253         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1254                             I40E_GLPRT_PRC9522L(hw->port),
1255                             pf->offset_loaded, &os->rx_size_big,
1256                             &ns->rx_size_big);
1257         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1258                             pf->offset_loaded, &os->rx_undersize,
1259                             &ns->rx_undersize);
1260         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1261                             pf->offset_loaded, &os->rx_fragments,
1262                             &ns->rx_fragments);
1263         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1264                             pf->offset_loaded, &os->rx_oversize,
1265                             &ns->rx_oversize);
1266         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1267                             pf->offset_loaded, &os->rx_jabber,
1268                             &ns->rx_jabber);
1269         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1270                             I40E_GLPRT_PTC64L(hw->port),
1271                             pf->offset_loaded, &os->tx_size_64,
1272                             &ns->tx_size_64);
1273         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1274                             I40E_GLPRT_PTC127L(hw->port),
1275                             pf->offset_loaded, &os->tx_size_127,
1276                             &ns->tx_size_127);
1277         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1278                             I40E_GLPRT_PTC255L(hw->port),
1279                             pf->offset_loaded, &os->tx_size_255,
1280                             &ns->tx_size_255);
1281         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1282                             I40E_GLPRT_PTC511L(hw->port),
1283                             pf->offset_loaded, &os->tx_size_511,
1284                             &ns->tx_size_511);
1285         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1286                             I40E_GLPRT_PTC1023L(hw->port),
1287                             pf->offset_loaded, &os->tx_size_1023,
1288                             &ns->tx_size_1023);
1289         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1290                             I40E_GLPRT_PTC1522L(hw->port),
1291                             pf->offset_loaded, &os->tx_size_1522,
1292                             &ns->tx_size_1522);
1293         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1294                             I40E_GLPRT_PTC9522L(hw->port),
1295                             pf->offset_loaded, &os->tx_size_big,
1296                             &ns->tx_size_big);
1297         /* GLPRT_MSPDC not supported */
1298         /* GLPRT_XEC not supported */
1299
1300         pf->offset_loaded = true;
1301
1302         if (pf->main_vsi)
1303                 i40e_update_vsi_stats(pf->main_vsi);
1304
1305         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1306                                                 ns->eth.rx_broadcast;
1307         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1308                                                 ns->eth.tx_broadcast;
1309         stats->ibytes   = ns->eth.rx_bytes;
1310         stats->obytes   = ns->eth.tx_bytes;
1311         stats->oerrors  = ns->eth.tx_errors;
1312         stats->imcasts  = ns->eth.rx_multicast;
1313
1314         /* Rx Errors */
1315         stats->ibadcrc  = ns->crc_errors;
1316         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1317                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1318         stats->imissed  = ns->eth.rx_discards;
1319         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1320
1321         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1322         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", ns->eth.rx_bytes);
1323         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", ns->eth.rx_unicast);
1324         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", ns->eth.rx_multicast);
1325         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", ns->eth.rx_broadcast);
1326         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", ns->eth.rx_discards);
1327         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1328                     ns->eth.rx_unknown_protocol);
1329         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", ns->eth.tx_bytes);
1330         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", ns->eth.tx_unicast);
1331         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", ns->eth.tx_multicast);
1332         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", ns->eth.tx_broadcast);
1333         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", ns->eth.tx_discards);
1334         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", ns->eth.tx_errors);
1335
1336         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %lu",
1337                     ns->tx_dropped_link_down);
1338         PMD_DRV_LOG(DEBUG, "crc_errors:               %lu", ns->crc_errors);
1339         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %lu",
1340                     ns->illegal_bytes);
1341         PMD_DRV_LOG(DEBUG, "error_bytes:              %lu", ns->error_bytes);
1342         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %lu",
1343                     ns->mac_local_faults);
1344         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %lu",
1345                     ns->mac_remote_faults);
1346         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %lu",
1347                     ns->rx_length_errors);
1348         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %lu", ns->link_xon_rx);
1349         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %lu", ns->link_xoff_rx);
1350         for (i = 0; i < 8; i++) {
1351                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %lu",
1352                                 i, ns->priority_xon_rx[i]);
1353                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %lu",
1354                                 i, ns->priority_xoff_rx[i]);
1355         }
1356         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %lu", ns->link_xon_tx);
1357         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %lu", ns->link_xoff_tx);
1358         for (i = 0; i < 8; i++) {
1359                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %lu",
1360                                 i, ns->priority_xon_tx[i]);
1361                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %lu",
1362                                 i, ns->priority_xoff_tx[i]);
1363                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %lu",
1364                                 i, ns->priority_xon_2_xoff[i]);
1365         }
1366         PMD_DRV_LOG(DEBUG, "rx_size_64:               %lu", ns->rx_size_64);
1367         PMD_DRV_LOG(DEBUG, "rx_size_127:              %lu", ns->rx_size_127);
1368         PMD_DRV_LOG(DEBUG, "rx_size_255:              %lu", ns->rx_size_255);
1369         PMD_DRV_LOG(DEBUG, "rx_size_511:              %lu", ns->rx_size_511);
1370         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %lu", ns->rx_size_1023);
1371         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %lu", ns->rx_size_1522);
1372         PMD_DRV_LOG(DEBUG, "rx_size_big:              %lu", ns->rx_size_big);
1373         PMD_DRV_LOG(DEBUG, "rx_undersize:             %lu", ns->rx_undersize);
1374         PMD_DRV_LOG(DEBUG, "rx_fragments:             %lu", ns->rx_fragments);
1375         PMD_DRV_LOG(DEBUG, "rx_oversize:              %lu", ns->rx_oversize);
1376         PMD_DRV_LOG(DEBUG, "rx_jabber:                %lu", ns->rx_jabber);
1377         PMD_DRV_LOG(DEBUG, "tx_size_64:               %lu", ns->tx_size_64);
1378         PMD_DRV_LOG(DEBUG, "tx_size_127:              %lu", ns->tx_size_127);
1379         PMD_DRV_LOG(DEBUG, "tx_size_255:              %lu", ns->tx_size_255);
1380         PMD_DRV_LOG(DEBUG, "tx_size_511:              %lu", ns->tx_size_511);
1381         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %lu", ns->tx_size_1023);
1382         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %lu", ns->tx_size_1522);
1383         PMD_DRV_LOG(DEBUG, "tx_size_big:              %lu", ns->tx_size_big);
1384         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1385                         ns->mac_short_packet_dropped);
1386         PMD_DRV_LOG(DEBUG, "checksum_error:           %lu",
1387                     ns->checksum_error);
1388         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1389 }
1390
1391 /* Reset the statistics */
1392 static void
1393 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1394 {
1395         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1396
1397         /* It results in reloading the start point of each counter */
1398         pf->offset_loaded = false;
1399 }
1400
1401 static int
1402 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1403                                  __rte_unused uint16_t queue_id,
1404                                  __rte_unused uint8_t stat_idx,
1405                                  __rte_unused uint8_t is_rx)
1406 {
1407         PMD_INIT_FUNC_TRACE();
1408
1409         return -ENOSYS;
1410 }
1411
1412 static void
1413 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1414 {
1415         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1416         struct i40e_vsi *vsi = pf->main_vsi;
1417
1418         dev_info->max_rx_queues = vsi->nb_qps;
1419         dev_info->max_tx_queues = vsi->nb_qps;
1420         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1421         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1422         dev_info->max_mac_addrs = vsi->max_macaddrs;
1423         dev_info->max_vfs = dev->pci_dev->max_vfs;
1424         dev_info->rx_offload_capa =
1425                 DEV_RX_OFFLOAD_VLAN_STRIP |
1426                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1427                 DEV_RX_OFFLOAD_UDP_CKSUM |
1428                 DEV_RX_OFFLOAD_TCP_CKSUM;
1429         dev_info->tx_offload_capa =
1430                 DEV_TX_OFFLOAD_VLAN_INSERT |
1431                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1432                 DEV_TX_OFFLOAD_UDP_CKSUM |
1433                 DEV_TX_OFFLOAD_TCP_CKSUM |
1434                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1435         dev_info->reta_size = pf->hash_lut_size;
1436
1437         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1438                 .rx_thresh = {
1439                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1440                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1441                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1442                 },
1443                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1444                 .rx_drop_en = 0,
1445         };
1446
1447         dev_info->default_txconf = (struct rte_eth_txconf) {
1448                 .tx_thresh = {
1449                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1450                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1451                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1452                 },
1453                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1454                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1455                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1456                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1457         };
1458
1459         if (pf->flags | I40E_FLAG_VMDQ) {
1460                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1461                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1462                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1463                                                 pf->max_nb_vmdq_vsi;
1464                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1465                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1466                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1467         }
1468 }
1469
1470 static int
1471 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1472 {
1473         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1474         struct i40e_vsi *vsi = pf->main_vsi;
1475         PMD_INIT_FUNC_TRACE();
1476
1477         if (on)
1478                 return i40e_vsi_add_vlan(vsi, vlan_id);
1479         else
1480                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1481 }
1482
1483 static void
1484 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1485                    __rte_unused uint16_t tpid)
1486 {
1487         PMD_INIT_FUNC_TRACE();
1488 }
1489
1490 static void
1491 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1492 {
1493         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1494         struct i40e_vsi *vsi = pf->main_vsi;
1495
1496         if (mask & ETH_VLAN_STRIP_MASK) {
1497                 /* Enable or disable VLAN stripping */
1498                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1499                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1500                 else
1501                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1502         }
1503
1504         if (mask & ETH_VLAN_EXTEND_MASK) {
1505                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1506                         i40e_vsi_config_double_vlan(vsi, TRUE);
1507                 else
1508                         i40e_vsi_config_double_vlan(vsi, FALSE);
1509         }
1510 }
1511
1512 static void
1513 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1514                           __rte_unused uint16_t queue,
1515                           __rte_unused int on)
1516 {
1517         PMD_INIT_FUNC_TRACE();
1518 }
1519
1520 static int
1521 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1522 {
1523         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1524         struct i40e_vsi *vsi = pf->main_vsi;
1525         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1526         struct i40e_vsi_vlan_pvid_info info;
1527
1528         memset(&info, 0, sizeof(info));
1529         info.on = on;
1530         if (info.on)
1531                 info.config.pvid = pvid;
1532         else {
1533                 info.config.reject.tagged =
1534                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1535                 info.config.reject.untagged =
1536                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1537         }
1538
1539         return i40e_vsi_vlan_pvid_set(vsi, &info);
1540 }
1541
1542 static int
1543 i40e_dev_led_on(struct rte_eth_dev *dev)
1544 {
1545         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1546         uint32_t mode = i40e_led_get(hw);
1547
1548         if (mode == 0)
1549                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1550
1551         return 0;
1552 }
1553
1554 static int
1555 i40e_dev_led_off(struct rte_eth_dev *dev)
1556 {
1557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1558         uint32_t mode = i40e_led_get(hw);
1559
1560         if (mode != 0)
1561                 i40e_led_set(hw, 0, false);
1562
1563         return 0;
1564 }
1565
1566 static int
1567 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1568                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1569 {
1570         PMD_INIT_FUNC_TRACE();
1571
1572         return -ENOSYS;
1573 }
1574
1575 static int
1576 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1577                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1578 {
1579         PMD_INIT_FUNC_TRACE();
1580
1581         return -ENOSYS;
1582 }
1583
1584 /* Add a MAC address, and update filters */
1585 static void
1586 i40e_macaddr_add(struct rte_eth_dev *dev,
1587                  struct ether_addr *mac_addr,
1588                  __rte_unused uint32_t index,
1589                  uint32_t pool)
1590 {
1591         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1592         struct i40e_mac_filter_info mac_filter;
1593         struct i40e_vsi *vsi;
1594         int ret;
1595
1596         /* If VMDQ not enabled or configured, return */
1597         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1598                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1599                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1600                         pool);
1601                 return;
1602         }
1603
1604         if (pool > pf->nb_cfg_vmdq_vsi) {
1605                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1606                                 pool, pf->nb_cfg_vmdq_vsi);
1607                 return;
1608         }
1609
1610         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1611         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1612
1613         if (pool == 0)
1614                 vsi = pf->main_vsi;
1615         else
1616                 vsi = pf->vmdq[pool - 1].vsi;
1617
1618         ret = i40e_vsi_add_mac(vsi, &mac_filter);
1619         if (ret != I40E_SUCCESS) {
1620                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1621                 return;
1622         }
1623 }
1624
1625 /* Remove a MAC address, and update filters */
1626 static void
1627 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1628 {
1629         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1630         struct i40e_vsi *vsi;
1631         struct rte_eth_dev_data *data = dev->data;
1632         struct ether_addr *macaddr;
1633         int ret;
1634         uint32_t i;
1635         uint64_t pool_sel;
1636
1637         macaddr = &(data->mac_addrs[index]);
1638
1639         pool_sel = dev->data->mac_pool_sel[index];
1640
1641         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1642                 if (pool_sel & (1ULL << i)) {
1643                         if (i == 0)
1644                                 vsi = pf->main_vsi;
1645                         else {
1646                                 /* No VMDQ pool enabled or configured */
1647                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1648                                         (i > pf->nb_cfg_vmdq_vsi)) {
1649                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1650                                                         "/configured");
1651                                         return;
1652                                 }
1653                                 vsi = pf->vmdq[i - 1].vsi;
1654                         }
1655                         ret = i40e_vsi_delete_mac(vsi, macaddr);
1656
1657                         if (ret) {
1658                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1659                                 return;
1660                         }
1661                 }
1662         }
1663 }
1664
1665 /* Set perfect match or hash match of MAC and VLAN for a VF */
1666 static int
1667 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1668                  struct rte_eth_mac_filter *filter,
1669                  bool add)
1670 {
1671         struct i40e_hw *hw;
1672         struct i40e_mac_filter_info mac_filter;
1673         struct ether_addr old_mac;
1674         struct ether_addr *new_mac;
1675         struct i40e_pf_vf *vf = NULL;
1676         uint16_t vf_id;
1677         int ret;
1678
1679         if (pf == NULL) {
1680                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1681                 return -EINVAL;
1682         }
1683         hw = I40E_PF_TO_HW(pf);
1684
1685         if (filter == NULL) {
1686                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1687                 return -EINVAL;
1688         }
1689
1690         new_mac = &filter->mac_addr;
1691
1692         if (is_zero_ether_addr(new_mac)) {
1693                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1694                 return -EINVAL;
1695         }
1696
1697         vf_id = filter->dst_id;
1698
1699         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1700                 PMD_DRV_LOG(ERR, "Invalid argument.");
1701                 return -EINVAL;
1702         }
1703         vf = &pf->vfs[vf_id];
1704
1705         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1706                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1707                 return -EINVAL;
1708         }
1709
1710         if (add) {
1711                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1712                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1713                                 ETHER_ADDR_LEN);
1714                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1715                                  ETHER_ADDR_LEN);
1716
1717                 mac_filter.filter_type = filter->filter_type;
1718                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1719                 if (ret != I40E_SUCCESS) {
1720                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1721                         return -1;
1722                 }
1723                 ether_addr_copy(new_mac, &pf->dev_addr);
1724         } else {
1725                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1726                                 ETHER_ADDR_LEN);
1727                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1728                 if (ret != I40E_SUCCESS) {
1729                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1730                         return -1;
1731                 }
1732
1733                 /* Clear device address as it has been removed */
1734                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1735                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1736         }
1737
1738         return 0;
1739 }
1740
1741 /* MAC filter handle */
1742 static int
1743 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1744                 void *arg)
1745 {
1746         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1747         struct rte_eth_mac_filter *filter;
1748         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1749         int ret = I40E_NOT_SUPPORTED;
1750
1751         filter = (struct rte_eth_mac_filter *)(arg);
1752
1753         switch (filter_op) {
1754         case RTE_ETH_FILTER_NOP:
1755                 ret = I40E_SUCCESS;
1756                 break;
1757         case RTE_ETH_FILTER_ADD:
1758                 i40e_pf_disable_irq0(hw);
1759                 if (filter->is_vf)
1760                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
1761                 i40e_pf_enable_irq0(hw);
1762                 break;
1763         case RTE_ETH_FILTER_DELETE:
1764                 i40e_pf_disable_irq0(hw);
1765                 if (filter->is_vf)
1766                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
1767                 i40e_pf_enable_irq0(hw);
1768                 break;
1769         default:
1770                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1771                 ret = I40E_ERR_PARAM;
1772                 break;
1773         }
1774
1775         return ret;
1776 }
1777
1778 static int
1779 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1780                          struct rte_eth_rss_reta *reta_conf)
1781 {
1782         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1783         uint32_t lut, l;
1784         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1785
1786         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1787                 if (i < max)
1788                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1789                 else
1790                         mask = (uint8_t)((reta_conf->mask_hi >>
1791                                                 (i - max)) & 0xF);
1792
1793                 if (!mask)
1794                         continue;
1795
1796                 if (mask == 0xF)
1797                         l = 0;
1798                 else
1799                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1800
1801                 for (j = 0, lut = 0; j < 4; j++) {
1802                         if (mask & (0x1 << j))
1803                                 lut |= reta_conf->reta[i + j] << (8 * j);
1804                         else
1805                                 lut |= l & (0xFF << (8 * j));
1806                 }
1807                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1808         }
1809
1810         return 0;
1811 }
1812
1813 static int
1814 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1815                         struct rte_eth_rss_reta *reta_conf)
1816 {
1817         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1818         uint32_t lut;
1819         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1820
1821         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1822                 if (i < max)
1823                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1824                 else
1825                         mask = (uint8_t)((reta_conf->mask_hi >>
1826                                                 (i - max)) & 0xF);
1827
1828                 if (!mask)
1829                         continue;
1830
1831                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1832                 for (j = 0; j < 4; j++) {
1833                         if (mask & (0x1 << j))
1834                                 reta_conf->reta[i + j] =
1835                                         (uint8_t)((lut >> (8 * j)) & 0xFF);
1836                 }
1837         }
1838
1839         return 0;
1840 }
1841
1842 /**
1843  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1844  * @hw:   pointer to the HW structure
1845  * @mem:  pointer to mem struct to fill out
1846  * @size: size of memory requested
1847  * @alignment: what to align the allocation to
1848  **/
1849 enum i40e_status_code
1850 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1851                         struct i40e_dma_mem *mem,
1852                         u64 size,
1853                         u32 alignment)
1854 {
1855         static uint64_t id = 0;
1856         const struct rte_memzone *mz = NULL;
1857         char z_name[RTE_MEMZONE_NAMESIZE];
1858
1859         if (!mem)
1860                 return I40E_ERR_PARAM;
1861
1862         id++;
1863         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1864 #ifdef RTE_LIBRTE_XEN_DOM0
1865         mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1866                                                         RTE_PGSIZE_2M);
1867 #else
1868         mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1869 #endif
1870         if (!mz)
1871                 return I40E_ERR_NO_MEMORY;
1872
1873         mem->id = id;
1874         mem->size = size;
1875         mem->va = mz->addr;
1876 #ifdef RTE_LIBRTE_XEN_DOM0
1877         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1878 #else
1879         mem->pa = mz->phys_addr;
1880 #endif
1881
1882         return I40E_SUCCESS;
1883 }
1884
1885 /**
1886  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1887  * @hw:   pointer to the HW structure
1888  * @mem:  ptr to mem struct to free
1889  **/
1890 enum i40e_status_code
1891 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1892                     struct i40e_dma_mem *mem)
1893 {
1894         if (!mem || !mem->va)
1895                 return I40E_ERR_PARAM;
1896
1897         mem->va = NULL;
1898         mem->pa = (u64)0;
1899
1900         return I40E_SUCCESS;
1901 }
1902
1903 /**
1904  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1905  * @hw:   pointer to the HW structure
1906  * @mem:  pointer to mem struct to fill out
1907  * @size: size of memory requested
1908  **/
1909 enum i40e_status_code
1910 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1911                          struct i40e_virt_mem *mem,
1912                          u32 size)
1913 {
1914         if (!mem)
1915                 return I40E_ERR_PARAM;
1916
1917         mem->size = size;
1918         mem->va = rte_zmalloc("i40e", size, 0);
1919
1920         if (mem->va)
1921                 return I40E_SUCCESS;
1922         else
1923                 return I40E_ERR_NO_MEMORY;
1924 }
1925
1926 /**
1927  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1928  * @hw:   pointer to the HW structure
1929  * @mem:  pointer to mem struct to free
1930  **/
1931 enum i40e_status_code
1932 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1933                      struct i40e_virt_mem *mem)
1934 {
1935         if (!mem)
1936                 return I40E_ERR_PARAM;
1937
1938         rte_free(mem->va);
1939         mem->va = NULL;
1940
1941         return I40E_SUCCESS;
1942 }
1943
1944 void
1945 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1946 {
1947         rte_spinlock_init(&sp->spinlock);
1948 }
1949
1950 void
1951 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1952 {
1953         rte_spinlock_lock(&sp->spinlock);
1954 }
1955
1956 void
1957 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1958 {
1959         rte_spinlock_unlock(&sp->spinlock);
1960 }
1961
1962 void
1963 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1964 {
1965         return;
1966 }
1967
1968 /**
1969  * Get the hardware capabilities, which will be parsed
1970  * and saved into struct i40e_hw.
1971  */
1972 static int
1973 i40e_get_cap(struct i40e_hw *hw)
1974 {
1975         struct i40e_aqc_list_capabilities_element_resp *buf;
1976         uint16_t len, size = 0;
1977         int ret;
1978
1979         /* Calculate a huge enough buff for saving response data temporarily */
1980         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1981                                                 I40E_MAX_CAP_ELE_NUM;
1982         buf = rte_zmalloc("i40e", len, 0);
1983         if (!buf) {
1984                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1985                 return I40E_ERR_NO_MEMORY;
1986         }
1987
1988         /* Get, parse the capabilities and save it to hw */
1989         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1990                         i40e_aqc_opc_list_func_capabilities, NULL);
1991         if (ret != I40E_SUCCESS)
1992                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1993
1994         /* Free the temporary buffer after being used */
1995         rte_free(buf);
1996
1997         return ret;
1998 }
1999
2000 static int
2001 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2002 {
2003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2004         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2005         uint16_t sum_queues = 0, sum_vsis, left_queues;
2006
2007         /* First check if FW support SRIOV */
2008         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2009                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2010                 return -EINVAL;
2011         }
2012
2013         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2014         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2015         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2016         /* Allocate queues for pf */
2017         if (hw->func_caps.rss) {
2018                 pf->flags |= I40E_FLAG_RSS;
2019                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2020                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2021                 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2022         } else
2023                 pf->lan_nb_qps = 1;
2024         sum_queues = pf->lan_nb_qps;
2025         /* Default VSI is not counted in */
2026         sum_vsis = 0;
2027         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2028
2029         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2030                 pf->flags |= I40E_FLAG_SRIOV;
2031                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2032                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2033                         PMD_INIT_LOG(ERR, "Config VF number %u, "
2034                                      "max supported %u.",
2035                                      dev->pci_dev->max_vfs,
2036                                      hw->func_caps.num_vfs);
2037                         return -EINVAL;
2038                 }
2039                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2040                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2041                                      "max support %u queues.",
2042                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2043                         return -EINVAL;
2044                 }
2045                 pf->vf_num = dev->pci_dev->max_vfs;
2046                 sum_queues += pf->vf_nb_qps * pf->vf_num;
2047                 sum_vsis   += pf->vf_num;
2048                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2049                              pf->vf_num, pf->vf_nb_qps);
2050         } else
2051                 pf->vf_num = 0;
2052
2053         if (hw->func_caps.vmdq) {
2054                 pf->flags |= I40E_FLAG_VMDQ;
2055                 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2056                 pf->max_nb_vmdq_vsi = 1;
2057                 /*
2058                  * If VMDQ available, assume a single VSI can be created.  Will adjust
2059                  * later.
2060                  */
2061                 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2062                 sum_vsis += pf->max_nb_vmdq_vsi;
2063         } else {
2064                 pf->vmdq_nb_qps = 0;
2065                 pf->max_nb_vmdq_vsi = 0;
2066         }
2067         pf->nb_cfg_vmdq_vsi = 0;
2068
2069         if (hw->func_caps.fd) {
2070                 pf->flags |= I40E_FLAG_FDIR;
2071                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2072                 /**
2073                  * Each flow director consumes one VSI and one queue,
2074                  * but can't calculate out predictably here.
2075                  */
2076         }
2077
2078         if (sum_vsis > pf->max_num_vsi ||
2079                 sum_queues > hw->func_caps.num_rx_qp) {
2080                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2081                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2082                              pf->max_num_vsi, sum_vsis);
2083                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2084                              hw->func_caps.num_rx_qp, sum_queues);
2085                 return -EINVAL;
2086         }
2087
2088         /* Adjust VMDQ setting to support as many VMs as possible */
2089         if (pf->flags & I40E_FLAG_VMDQ) {
2090                 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2091
2092                 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2093                                         pf->max_num_vsi - sum_vsis);
2094
2095                 /* Limit the max VMDQ number that rte_ether that can support  */
2096                 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2097                                         ETH_64_POOLS - 1);
2098
2099                 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2100                                 pf->max_nb_vmdq_vsi);
2101                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2102         }
2103
2104         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2105          * cause */
2106         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2107                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2108                              sum_vsis, hw->func_caps.num_msix_vectors);
2109                 return -EINVAL;
2110         }
2111         return I40E_SUCCESS;
2112 }
2113
2114 static int
2115 i40e_pf_get_switch_config(struct i40e_pf *pf)
2116 {
2117         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2118         struct i40e_aqc_get_switch_config_resp *switch_config;
2119         struct i40e_aqc_switch_config_element_resp *element;
2120         uint16_t start_seid = 0, num_reported;
2121         int ret;
2122
2123         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2124                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2125         if (!switch_config) {
2126                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2127                 return -ENOMEM;
2128         }
2129
2130         /* Get the switch configurations */
2131         ret = i40e_aq_get_switch_config(hw, switch_config,
2132                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2133         if (ret != I40E_SUCCESS) {
2134                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2135                 goto fail;
2136         }
2137         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2138         if (num_reported != 1) { /* The number should be 1 */
2139                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2140                 goto fail;
2141         }
2142
2143         /* Parse the switch configuration elements */
2144         element = &(switch_config->element[0]);
2145         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2146                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2147                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2148         } else
2149                 PMD_DRV_LOG(INFO, "Unknown element type");
2150
2151 fail:
2152         rte_free(switch_config);
2153
2154         return ret;
2155 }
2156
2157 static int
2158 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2159                         uint32_t num)
2160 {
2161         struct pool_entry *entry;
2162
2163         if (pool == NULL || num == 0)
2164                 return -EINVAL;
2165
2166         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2167         if (entry == NULL) {
2168                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2169                 return -ENOMEM;
2170         }
2171
2172         /* queue heap initialize */
2173         pool->num_free = num;
2174         pool->num_alloc = 0;
2175         pool->base = base;
2176         LIST_INIT(&pool->alloc_list);
2177         LIST_INIT(&pool->free_list);
2178
2179         /* Initialize element  */
2180         entry->base = 0;
2181         entry->len = num;
2182
2183         LIST_INSERT_HEAD(&pool->free_list, entry, next);
2184         return 0;
2185 }
2186
2187 static void
2188 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2189 {
2190         struct pool_entry *entry;
2191
2192         if (pool == NULL)
2193                 return;
2194
2195         LIST_FOREACH(entry, &pool->alloc_list, next) {
2196                 LIST_REMOVE(entry, next);
2197                 rte_free(entry);
2198         }
2199
2200         LIST_FOREACH(entry, &pool->free_list, next) {
2201                 LIST_REMOVE(entry, next);
2202                 rte_free(entry);
2203         }
2204
2205         pool->num_free = 0;
2206         pool->num_alloc = 0;
2207         pool->base = 0;
2208         LIST_INIT(&pool->alloc_list);
2209         LIST_INIT(&pool->free_list);
2210 }
2211
2212 static int
2213 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2214                        uint32_t base)
2215 {
2216         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2217         uint32_t pool_offset;
2218         int insert;
2219
2220         if (pool == NULL) {
2221                 PMD_DRV_LOG(ERR, "Invalid parameter");
2222                 return -EINVAL;
2223         }
2224
2225         pool_offset = base - pool->base;
2226         /* Lookup in alloc list */
2227         LIST_FOREACH(entry, &pool->alloc_list, next) {
2228                 if (entry->base == pool_offset) {
2229                         valid_entry = entry;
2230                         LIST_REMOVE(entry, next);
2231                         break;
2232                 }
2233         }
2234
2235         /* Not find, return */
2236         if (valid_entry == NULL) {
2237                 PMD_DRV_LOG(ERR, "Failed to find entry");
2238                 return -EINVAL;
2239         }
2240
2241         /**
2242          * Found it, move it to free list  and try to merge.
2243          * In order to make merge easier, always sort it by qbase.
2244          * Find adjacent prev and last entries.
2245          */
2246         prev = next = NULL;
2247         LIST_FOREACH(entry, &pool->free_list, next) {
2248                 if (entry->base > valid_entry->base) {
2249                         next = entry;
2250                         break;
2251                 }
2252                 prev = entry;
2253         }
2254
2255         insert = 0;
2256         /* Try to merge with next one*/
2257         if (next != NULL) {
2258                 /* Merge with next one */
2259                 if (valid_entry->base + valid_entry->len == next->base) {
2260                         next->base = valid_entry->base;
2261                         next->len += valid_entry->len;
2262                         rte_free(valid_entry);
2263                         valid_entry = next;
2264                         insert = 1;
2265                 }
2266         }
2267
2268         if (prev != NULL) {
2269                 /* Merge with previous one */
2270                 if (prev->base + prev->len == valid_entry->base) {
2271                         prev->len += valid_entry->len;
2272                         /* If it merge with next one, remove next node */
2273                         if (insert == 1) {
2274                                 LIST_REMOVE(valid_entry, next);
2275                                 rte_free(valid_entry);
2276                         } else {
2277                                 rte_free(valid_entry);
2278                                 insert = 1;
2279                         }
2280                 }
2281         }
2282
2283         /* Not find any entry to merge, insert */
2284         if (insert == 0) {
2285                 if (prev != NULL)
2286                         LIST_INSERT_AFTER(prev, valid_entry, next);
2287                 else if (next != NULL)
2288                         LIST_INSERT_BEFORE(next, valid_entry, next);
2289                 else /* It's empty list, insert to head */
2290                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2291         }
2292
2293         pool->num_free += valid_entry->len;
2294         pool->num_alloc -= valid_entry->len;
2295
2296         return 0;
2297 }
2298
2299 static int
2300 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2301                        uint16_t num)
2302 {
2303         struct pool_entry *entry, *valid_entry;
2304
2305         if (pool == NULL || num == 0) {
2306                 PMD_DRV_LOG(ERR, "Invalid parameter");
2307                 return -EINVAL;
2308         }
2309
2310         if (pool->num_free < num) {
2311                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2312                             num, pool->num_free);
2313                 return -ENOMEM;
2314         }
2315
2316         valid_entry = NULL;
2317         /* Lookup  in free list and find most fit one */
2318         LIST_FOREACH(entry, &pool->free_list, next) {
2319                 if (entry->len >= num) {
2320                         /* Find best one */
2321                         if (entry->len == num) {
2322                                 valid_entry = entry;
2323                                 break;
2324                         }
2325                         if (valid_entry == NULL || valid_entry->len > entry->len)
2326                                 valid_entry = entry;
2327                 }
2328         }
2329
2330         /* Not find one to satisfy the request, return */
2331         if (valid_entry == NULL) {
2332                 PMD_DRV_LOG(ERR, "No valid entry found");
2333                 return -ENOMEM;
2334         }
2335         /**
2336          * The entry have equal queue number as requested,
2337          * remove it from alloc_list.
2338          */
2339         if (valid_entry->len == num) {
2340                 LIST_REMOVE(valid_entry, next);
2341         } else {
2342                 /**
2343                  * The entry have more numbers than requested,
2344                  * create a new entry for alloc_list and minus its
2345                  * queue base and number in free_list.
2346                  */
2347                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2348                 if (entry == NULL) {
2349                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2350                                     "resource pool");
2351                         return -ENOMEM;
2352                 }
2353                 entry->base = valid_entry->base;
2354                 entry->len = num;
2355                 valid_entry->base += num;
2356                 valid_entry->len -= num;
2357                 valid_entry = entry;
2358         }
2359
2360         /* Insert it into alloc list, not sorted */
2361         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2362
2363         pool->num_free -= valid_entry->len;
2364         pool->num_alloc += valid_entry->len;
2365
2366         return (valid_entry->base + pool->base);
2367 }
2368
2369 /**
2370  * bitmap_is_subset - Check whether src2 is subset of src1
2371  **/
2372 static inline int
2373 bitmap_is_subset(uint8_t src1, uint8_t src2)
2374 {
2375         return !((src1 ^ src2) & src2);
2376 }
2377
2378 static int
2379 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2380 {
2381         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2382
2383         /* If DCB is not supported, only default TC is supported */
2384         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2385                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2386                 return -EINVAL;
2387         }
2388
2389         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2390                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2391                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2392                             enabled_tcmap);
2393                 return -EINVAL;
2394         }
2395         return I40E_SUCCESS;
2396 }
2397
2398 int
2399 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2400                                 struct i40e_vsi_vlan_pvid_info *info)
2401 {
2402         struct i40e_hw *hw;
2403         struct i40e_vsi_context ctxt;
2404         uint8_t vlan_flags = 0;
2405         int ret;
2406
2407         if (vsi == NULL || info == NULL) {
2408                 PMD_DRV_LOG(ERR, "invalid parameters");
2409                 return I40E_ERR_PARAM;
2410         }
2411
2412         if (info->on) {
2413                 vsi->info.pvid = info->config.pvid;
2414                 /**
2415                  * If insert pvid is enabled, only tagged pkts are
2416                  * allowed to be sent out.
2417                  */
2418                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2419                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2420         } else {
2421                 vsi->info.pvid = 0;
2422                 if (info->config.reject.tagged == 0)
2423                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2424
2425                 if (info->config.reject.untagged == 0)
2426                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2427         }
2428         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2429                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2430         vsi->info.port_vlan_flags |= vlan_flags;
2431         vsi->info.valid_sections =
2432                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2433         memset(&ctxt, 0, sizeof(ctxt));
2434         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2435         ctxt.seid = vsi->seid;
2436
2437         hw = I40E_VSI_TO_HW(vsi);
2438         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2439         if (ret != I40E_SUCCESS)
2440                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2441
2442         return ret;
2443 }
2444
2445 static int
2446 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2447 {
2448         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2449         int i, ret;
2450         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2451
2452         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2453         if (ret != I40E_SUCCESS)
2454                 return ret;
2455
2456         if (!vsi->seid) {
2457                 PMD_DRV_LOG(ERR, "seid not valid");
2458                 return -EINVAL;
2459         }
2460
2461         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2462         tc_bw_data.tc_valid_bits = enabled_tcmap;
2463         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2464                 tc_bw_data.tc_bw_credits[i] =
2465                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2466
2467         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2468         if (ret != I40E_SUCCESS) {
2469                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2470                 return ret;
2471         }
2472
2473         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2474                                         sizeof(vsi->info.qs_handle));
2475         return I40E_SUCCESS;
2476 }
2477
2478 static int
2479 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2480                                  struct i40e_aqc_vsi_properties_data *info,
2481                                  uint8_t enabled_tcmap)
2482 {
2483         int ret, total_tc = 0, i;
2484         uint16_t qpnum_per_tc, bsf, qp_idx;
2485
2486         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2487         if (ret != I40E_SUCCESS)
2488                 return ret;
2489
2490         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2491                 if (enabled_tcmap & (1 << i))
2492                         total_tc++;
2493         vsi->enabled_tc = enabled_tcmap;
2494
2495         /* Number of queues per enabled TC */
2496         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2497         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2498         bsf = rte_bsf32(qpnum_per_tc);
2499
2500         /* Adjust the queue number to actual queues that can be applied */
2501         vsi->nb_qps = qpnum_per_tc * total_tc;
2502
2503         /**
2504          * Configure TC and queue mapping parameters, for enabled TC,
2505          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2506          * default queue will serve it.
2507          */
2508         qp_idx = 0;
2509         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2510                 if (vsi->enabled_tc & (1 << i)) {
2511                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2512                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2513                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2514                         qp_idx += qpnum_per_tc;
2515                 } else
2516                         info->tc_mapping[i] = 0;
2517         }
2518
2519         /* Associate queue number with VSI */
2520         if (vsi->type == I40E_VSI_SRIOV) {
2521                 info->mapping_flags |=
2522                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2523                 for (i = 0; i < vsi->nb_qps; i++)
2524                         info->queue_mapping[i] =
2525                                 rte_cpu_to_le_16(vsi->base_queue + i);
2526         } else {
2527                 info->mapping_flags |=
2528                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2529                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2530         }
2531         info->valid_sections =
2532                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2533
2534         return I40E_SUCCESS;
2535 }
2536
2537 static int
2538 i40e_veb_release(struct i40e_veb *veb)
2539 {
2540         struct i40e_vsi *vsi;
2541         struct i40e_hw *hw;
2542
2543         if (veb == NULL || veb->associate_vsi == NULL)
2544                 return -EINVAL;
2545
2546         if (!TAILQ_EMPTY(&veb->head)) {
2547                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2548                 return -EACCES;
2549         }
2550
2551         vsi = veb->associate_vsi;
2552         hw = I40E_VSI_TO_HW(vsi);
2553
2554         vsi->uplink_seid = veb->uplink_seid;
2555         i40e_aq_delete_element(hw, veb->seid, NULL);
2556         rte_free(veb);
2557         vsi->veb = NULL;
2558         return I40E_SUCCESS;
2559 }
2560
2561 /* Setup a veb */
2562 static struct i40e_veb *
2563 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2564 {
2565         struct i40e_veb *veb;
2566         int ret;
2567         struct i40e_hw *hw;
2568
2569         if (NULL == pf || vsi == NULL) {
2570                 PMD_DRV_LOG(ERR, "veb setup failed, "
2571                             "associated VSI shouldn't null");
2572                 return NULL;
2573         }
2574         hw = I40E_PF_TO_HW(pf);
2575
2576         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2577         if (!veb) {
2578                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2579                 goto fail;
2580         }
2581
2582         veb->associate_vsi = vsi;
2583         TAILQ_INIT(&veb->head);
2584         veb->uplink_seid = vsi->uplink_seid;
2585
2586         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2587                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2588
2589         if (ret != I40E_SUCCESS) {
2590                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2591                             hw->aq.asq_last_status);
2592                 goto fail;
2593         }
2594
2595         /* get statistics index */
2596         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2597                                 &veb->stats_idx, NULL, NULL, NULL);
2598         if (ret != I40E_SUCCESS) {
2599                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2600                             hw->aq.asq_last_status);
2601                 goto fail;
2602         }
2603
2604         /* Get VEB bandwidth, to be implemented */
2605         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2606         vsi->uplink_seid = veb->seid;
2607
2608         return veb;
2609 fail:
2610         rte_free(veb);
2611         return NULL;
2612 }
2613
2614 int
2615 i40e_vsi_release(struct i40e_vsi *vsi)
2616 {
2617         struct i40e_pf *pf;
2618         struct i40e_hw *hw;
2619         struct i40e_vsi_list *vsi_list;
2620         int ret;
2621         struct i40e_mac_filter *f;
2622
2623         if (!vsi)
2624                 return I40E_SUCCESS;
2625
2626         pf = I40E_VSI_TO_PF(vsi);
2627         hw = I40E_VSI_TO_HW(vsi);
2628
2629         /* VSI has child to attach, release child first */
2630         if (vsi->veb) {
2631                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2632                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2633                                 return -1;
2634                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2635                 }
2636                 i40e_veb_release(vsi->veb);
2637         }
2638
2639         /* Remove all macvlan filters of the VSI */
2640         i40e_vsi_remove_all_macvlan_filter(vsi);
2641         TAILQ_FOREACH(f, &vsi->mac_list, next)
2642                 rte_free(f);
2643
2644         if (vsi->type != I40E_VSI_MAIN) {
2645                 /* Remove vsi from parent's sibling list */
2646                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2647                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2648                         return I40E_ERR_PARAM;
2649                 }
2650                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2651                                 &vsi->sib_vsi_list, list);
2652
2653                 /* Remove all switch element of the VSI */
2654                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2655                 if (ret != I40E_SUCCESS)
2656                         PMD_DRV_LOG(ERR, "Failed to delete element");
2657         }
2658         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2659
2660         if (vsi->type != I40E_VSI_SRIOV)
2661                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2662         rte_free(vsi);
2663
2664         return I40E_SUCCESS;
2665 }
2666
2667 static int
2668 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2669 {
2670         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2671         struct i40e_aqc_remove_macvlan_element_data def_filter;
2672         struct i40e_mac_filter_info filter;
2673         int ret;
2674
2675         if (vsi->type != I40E_VSI_MAIN)
2676                 return I40E_ERR_CONFIG;
2677         memset(&def_filter, 0, sizeof(def_filter));
2678         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2679                                         ETH_ADDR_LEN);
2680         def_filter.vlan_tag = 0;
2681         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2682                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2683         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2684         if (ret != I40E_SUCCESS) {
2685                 struct i40e_mac_filter *f;
2686                 struct ether_addr *mac;
2687
2688                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2689                             "macvlan filter");
2690                 /* It needs to add the permanent mac into mac list */
2691                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2692                 if (f == NULL) {
2693                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2694                         return I40E_ERR_NO_MEMORY;
2695                 }
2696                 mac = &f->mac_info.mac_addr;
2697                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2698                                 ETH_ADDR_LEN);
2699                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2700                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2701                 vsi->mac_num++;
2702
2703                 return ret;
2704         }
2705         (void)rte_memcpy(&filter.mac_addr,
2706                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2707         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2708         return i40e_vsi_add_mac(vsi, &filter);
2709 }
2710
2711 static int
2712 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2713 {
2714         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2715         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2716         struct i40e_hw *hw = &vsi->adapter->hw;
2717         i40e_status ret;
2718         int i;
2719
2720         memset(&bw_config, 0, sizeof(bw_config));
2721         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2722         if (ret != I40E_SUCCESS) {
2723                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2724                             hw->aq.asq_last_status);
2725                 return ret;
2726         }
2727
2728         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2729         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2730                                         &ets_sla_config, NULL);
2731         if (ret != I40E_SUCCESS) {
2732                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2733                             "configuration %u", hw->aq.asq_last_status);
2734                 return ret;
2735         }
2736
2737         /* Not store the info yet, just print out */
2738         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2739         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2740         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2741                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2742                             ets_sla_config.share_credits[i]);
2743                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2744                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
2745                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2746                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2747                             (i * 4));
2748         }
2749
2750         return 0;
2751 }
2752
2753 /* Setup a VSI */
2754 struct i40e_vsi *
2755 i40e_vsi_setup(struct i40e_pf *pf,
2756                enum i40e_vsi_type type,
2757                struct i40e_vsi *uplink_vsi,
2758                uint16_t user_param)
2759 {
2760         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2761         struct i40e_vsi *vsi;
2762         struct i40e_mac_filter_info filter;
2763         int ret;
2764         struct i40e_vsi_context ctxt;
2765         struct ether_addr broadcast =
2766                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2767
2768         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2769                 PMD_DRV_LOG(ERR, "VSI setup failed, "
2770                             "VSI link shouldn't be NULL");
2771                 return NULL;
2772         }
2773
2774         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2775                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2776                             "uplink VSI should be NULL");
2777                 return NULL;
2778         }
2779
2780         /* If uplink vsi didn't setup VEB, create one first */
2781         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2782                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2783
2784                 if (NULL == uplink_vsi->veb) {
2785                         PMD_DRV_LOG(ERR, "VEB setup failed");
2786                         return NULL;
2787                 }
2788         }
2789
2790         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2791         if (!vsi) {
2792                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2793                 return NULL;
2794         }
2795         TAILQ_INIT(&vsi->mac_list);
2796         vsi->type = type;
2797         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2798         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2799         vsi->parent_vsi = uplink_vsi;
2800         vsi->user_param = user_param;
2801         /* Allocate queues */
2802         switch (vsi->type) {
2803         case I40E_VSI_MAIN  :
2804                 vsi->nb_qps = pf->lan_nb_qps;
2805                 break;
2806         case I40E_VSI_SRIOV :
2807                 vsi->nb_qps = pf->vf_nb_qps;
2808                 break;
2809         case I40E_VSI_VMDQ2:
2810                 vsi->nb_qps = pf->vmdq_nb_qps;
2811                 break;
2812         default:
2813                 goto fail_mem;
2814         }
2815         ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2816         if (ret < 0) {
2817                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2818                                 vsi->seid, ret);
2819                 goto fail_mem;
2820         }
2821         vsi->base_queue = ret;
2822
2823         /* VF has MSIX interrupt in VF range, don't allocate here */
2824         if (type != I40E_VSI_SRIOV) {
2825                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2826                 if (ret < 0) {
2827                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2828                         goto fail_queue_alloc;
2829                 }
2830                 vsi->msix_intr = ret;
2831         } else
2832                 vsi->msix_intr = 0;
2833         /* Add VSI */
2834         if (type == I40E_VSI_MAIN) {
2835                 /* For main VSI, no need to add since it's default one */
2836                 vsi->uplink_seid = pf->mac_seid;
2837                 vsi->seid = pf->main_vsi_seid;
2838                 /* Bind queues with specific MSIX interrupt */
2839                 /**
2840                  * Needs 2 interrupt at least, one for misc cause which will
2841                  * enabled from OS side, Another for queues binding the
2842                  * interrupt from device side only.
2843                  */
2844
2845                 /* Get default VSI parameters from hardware */
2846                 memset(&ctxt, 0, sizeof(ctxt));
2847                 ctxt.seid = vsi->seid;
2848                 ctxt.pf_num = hw->pf_id;
2849                 ctxt.uplink_seid = vsi->uplink_seid;
2850                 ctxt.vf_num = 0;
2851                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2852                 if (ret != I40E_SUCCESS) {
2853                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
2854                         goto fail_msix_alloc;
2855                 }
2856                 (void)rte_memcpy(&vsi->info, &ctxt.info,
2857                         sizeof(struct i40e_aqc_vsi_properties_data));
2858                 vsi->vsi_id = ctxt.vsi_number;
2859                 vsi->info.valid_sections = 0;
2860
2861                 /* Configure tc, enabled TC0 only */
2862                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2863                         I40E_SUCCESS) {
2864                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2865                         goto fail_msix_alloc;
2866                 }
2867
2868                 /* TC, queue mapping */
2869                 memset(&ctxt, 0, sizeof(ctxt));
2870                 vsi->info.valid_sections |=
2871                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2872                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2873                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2874                 (void)rte_memcpy(&ctxt.info, &vsi->info,
2875                         sizeof(struct i40e_aqc_vsi_properties_data));
2876                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2877                                                 I40E_DEFAULT_TCMAP);
2878                 if (ret != I40E_SUCCESS) {
2879                         PMD_DRV_LOG(ERR, "Failed to configure "
2880                                     "TC queue mapping");
2881                         goto fail_msix_alloc;
2882                 }
2883                 ctxt.seid = vsi->seid;
2884                 ctxt.pf_num = hw->pf_id;
2885                 ctxt.uplink_seid = vsi->uplink_seid;
2886                 ctxt.vf_num = 0;
2887
2888                 /* Update VSI parameters */
2889                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2890                 if (ret != I40E_SUCCESS) {
2891                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
2892                         goto fail_msix_alloc;
2893                 }
2894
2895                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2896                                                 sizeof(vsi->info.tc_mapping));
2897                 (void)rte_memcpy(&vsi->info.queue_mapping,
2898                                 &ctxt.info.queue_mapping,
2899                         sizeof(vsi->info.queue_mapping));
2900                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2901                 vsi->info.valid_sections = 0;
2902
2903                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2904                                 ETH_ADDR_LEN);
2905
2906                 /**
2907                  * Updating default filter settings are necessary to prevent
2908                  * reception of tagged packets.
2909                  * Some old firmware configurations load a default macvlan
2910                  * filter which accepts both tagged and untagged packets.
2911                  * The updating is to use a normal filter instead if needed.
2912                  * For NVM 4.2.2 or after, the updating is not needed anymore.
2913                  * The firmware with correct configurations load the default
2914                  * macvlan filter which is expected and cannot be removed.
2915                  */
2916                 i40e_update_default_filter_setting(vsi);
2917         } else if (type == I40E_VSI_SRIOV) {
2918                 memset(&ctxt, 0, sizeof(ctxt));
2919                 /**
2920                  * For other VSI, the uplink_seid equals to uplink VSI's
2921                  * uplink_seid since they share same VEB
2922                  */
2923                 vsi->uplink_seid = uplink_vsi->uplink_seid;
2924                 ctxt.pf_num = hw->pf_id;
2925                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2926                 ctxt.uplink_seid = vsi->uplink_seid;
2927                 ctxt.connection_type = 0x1;
2928                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2929
2930                 /* Configure switch ID */
2931                 ctxt.info.valid_sections |=
2932                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2933                 ctxt.info.switch_id =
2934                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2935                 /* Configure port/vlan */
2936                 ctxt.info.valid_sections |=
2937                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2938                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2939                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2940                                                 I40E_DEFAULT_TCMAP);
2941                 if (ret != I40E_SUCCESS) {
2942                         PMD_DRV_LOG(ERR, "Failed to configure "
2943                                     "TC queue mapping");
2944                         goto fail_msix_alloc;
2945                 }
2946                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2947                 ctxt.info.valid_sections |=
2948                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2949                 /**
2950                  * Since VSI is not created yet, only configure parameter,
2951                  * will add vsi below.
2952                  */
2953         } else if (type == I40E_VSI_VMDQ2) {
2954                 memset(&ctxt, 0, sizeof(ctxt));
2955                 /*
2956                  * For other VSI, the uplink_seid equals to uplink VSI's
2957                  * uplink_seid since they share same VEB
2958                  */
2959                 vsi->uplink_seid = uplink_vsi->uplink_seid;
2960                 ctxt.pf_num = hw->pf_id;
2961                 ctxt.vf_num = 0;
2962                 ctxt.uplink_seid = vsi->uplink_seid;
2963                 ctxt.connection_type = 0x1;
2964                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
2965
2966                 ctxt.info.valid_sections |=
2967                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2968                 /* user_param carries flag to enable loop back */
2969                 if (user_param) {
2970                         ctxt.info.switch_id =
2971                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
2972                         ctxt.info.switch_id |=
2973                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2974                 }
2975
2976                 /* Configure port/vlan */
2977                 ctxt.info.valid_sections |=
2978                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2979                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2980                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2981                                                 I40E_DEFAULT_TCMAP);
2982                 if (ret != I40E_SUCCESS) {
2983                         PMD_DRV_LOG(ERR, "Failed to configure "
2984                                         "TC queue mapping");
2985                         goto fail_msix_alloc;
2986                 }
2987                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2988                 ctxt.info.valid_sections |=
2989                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2990         } else {
2991                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2992                 goto fail_msix_alloc;
2993         }
2994
2995         if (vsi->type != I40E_VSI_MAIN) {
2996                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2997                 if (ret) {
2998                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2999                                     hw->aq.asq_last_status);
3000                         goto fail_msix_alloc;
3001                 }
3002                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3003                 vsi->info.valid_sections = 0;
3004                 vsi->seid = ctxt.seid;
3005                 vsi->vsi_id = ctxt.vsi_number;
3006                 vsi->sib_vsi_list.vsi = vsi;
3007                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3008                                 &vsi->sib_vsi_list, list);
3009         }
3010
3011         /* MAC/VLAN configuration */
3012         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3013         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3014
3015         ret = i40e_vsi_add_mac(vsi, &filter);
3016         if (ret != I40E_SUCCESS) {
3017                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3018                 goto fail_msix_alloc;
3019         }
3020
3021         /* Get VSI BW information */
3022         i40e_vsi_dump_bw_config(vsi);
3023         return vsi;
3024 fail_msix_alloc:
3025         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3026 fail_queue_alloc:
3027         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3028 fail_mem:
3029         rte_free(vsi);
3030         return NULL;
3031 }
3032
3033 /* Configure vlan stripping on or off */
3034 int
3035 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3036 {
3037         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3038         struct i40e_vsi_context ctxt;
3039         uint8_t vlan_flags;
3040         int ret = I40E_SUCCESS;
3041
3042         /* Check if it has been already on or off */
3043         if (vsi->info.valid_sections &
3044                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3045                 if (on) {
3046                         if ((vsi->info.port_vlan_flags &
3047                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3048                                 return 0; /* already on */
3049                 } else {
3050                         if ((vsi->info.port_vlan_flags &
3051                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3052                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3053                                 return 0; /* already off */
3054                 }
3055         }
3056
3057         if (on)
3058                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3059         else
3060                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3061         vsi->info.valid_sections =
3062                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3063         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3064         vsi->info.port_vlan_flags |= vlan_flags;
3065         ctxt.seid = vsi->seid;
3066         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3067         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3068         if (ret)
3069                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3070                             on ? "enable" : "disable");
3071
3072         return ret;
3073 }
3074
3075 static int
3076 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3077 {
3078         struct rte_eth_dev_data *data = dev->data;
3079         int ret;
3080
3081         /* Apply vlan offload setting */
3082         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3083
3084         /* Apply double-vlan setting, not implemented yet */
3085
3086         /* Apply pvid setting */
3087         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3088                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
3089         if (ret)
3090                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3091
3092         return ret;
3093 }
3094
3095 static int
3096 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3097 {
3098         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3099
3100         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3101 }
3102
3103 static int
3104 i40e_update_flow_control(struct i40e_hw *hw)
3105 {
3106 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3107         struct i40e_link_status link_status;
3108         uint32_t rxfc = 0, txfc = 0, reg;
3109         uint8_t an_info;
3110         int ret;
3111
3112         memset(&link_status, 0, sizeof(link_status));
3113         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3114         if (ret != I40E_SUCCESS) {
3115                 PMD_DRV_LOG(ERR, "Failed to get link status information");
3116                 goto write_reg; /* Disable flow control */
3117         }
3118
3119         an_info = hw->phy.link_info.an_info;
3120         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3121                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3122                 ret = I40E_ERR_NOT_READY;
3123                 goto write_reg; /* Disable flow control */
3124         }
3125         /**
3126          * If link auto negotiation is enabled, flow control needs to
3127          * be configured according to it
3128          */
3129         switch (an_info & I40E_LINK_PAUSE_RXTX) {
3130         case I40E_LINK_PAUSE_RXTX:
3131                 rxfc = 1;
3132                 txfc = 1;
3133                 hw->fc.current_mode = I40E_FC_FULL;
3134                 break;
3135         case I40E_AQ_LINK_PAUSE_RX:
3136                 rxfc = 1;
3137                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3138                 break;
3139         case I40E_AQ_LINK_PAUSE_TX:
3140                 txfc = 1;
3141                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3142                 break;
3143         default:
3144                 hw->fc.current_mode = I40E_FC_NONE;
3145                 break;
3146         }
3147
3148 write_reg:
3149         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3150                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3151         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3152         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3153         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3154         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3155
3156         return ret;
3157 }
3158
3159 /* PF setup */
3160 static int
3161 i40e_pf_setup(struct i40e_pf *pf)
3162 {
3163         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3164         struct i40e_filter_control_settings settings;
3165         struct i40e_vsi *vsi;
3166         int ret;
3167
3168         /* Clear all stats counters */
3169         pf->offset_loaded = FALSE;
3170         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3171         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3172
3173         ret = i40e_pf_get_switch_config(pf);
3174         if (ret != I40E_SUCCESS) {
3175                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3176                 return ret;
3177         }
3178
3179         /* VSI setup */
3180         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3181         if (!vsi) {
3182                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3183                 return I40E_ERR_NOT_READY;
3184         }
3185         pf->main_vsi = vsi;
3186
3187         /* Configure filter control */
3188         memset(&settings, 0, sizeof(settings));
3189         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3190                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3191         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3192                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3193         else {
3194                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3195                                                 hw->func_caps.rss_table_size);
3196                 return I40E_ERR_PARAM;
3197         }
3198         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3199                         "size: %u\n", hw->func_caps.rss_table_size);
3200         pf->hash_lut_size = hw->func_caps.rss_table_size;
3201
3202         /* Enable ethtype and macvlan filters */
3203         settings.enable_ethtype = TRUE;
3204         settings.enable_macvlan = TRUE;
3205         ret = i40e_set_filter_control(hw, &settings);
3206         if (ret)
3207                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3208                                                                 ret);
3209
3210         /* Update flow control according to the auto negotiation */
3211         i40e_update_flow_control(hw);
3212
3213         return I40E_SUCCESS;
3214 }
3215
3216 int
3217 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3218 {
3219         uint32_t reg;
3220         uint16_t j;
3221
3222         /**
3223          * Set or clear TX Queue Disable flags,
3224          * which is required by hardware.
3225          */
3226         i40e_pre_tx_queue_cfg(hw, q_idx, on);
3227         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3228
3229         /* Wait until the request is finished */
3230         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3231                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3232                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3233                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3234                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3235                                                         & 0x1))) {
3236                         break;
3237                 }
3238         }
3239         if (on) {
3240                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3241                         return I40E_SUCCESS; /* already on, skip next steps */
3242
3243                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3244                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3245         } else {
3246                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3247                         return I40E_SUCCESS; /* already off, skip next steps */
3248                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3249         }
3250         /* Write the register */
3251         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3252         /* Check the result */
3253         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3254                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3255                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3256                 if (on) {
3257                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3258                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3259                                 break;
3260                 } else {
3261                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3262                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3263                                 break;
3264                 }
3265         }
3266         /* Check if it is timeout */
3267         if (j >= I40E_CHK_Q_ENA_COUNT) {
3268                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3269                             (on ? "enable" : "disable"), q_idx);
3270                 return I40E_ERR_TIMEOUT;
3271         }
3272
3273         return I40E_SUCCESS;
3274 }
3275
3276 /* Swith on or off the tx queues */
3277 static int
3278 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3279 {
3280         struct rte_eth_dev_data *dev_data = pf->dev_data;
3281         struct i40e_tx_queue *txq;
3282         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3283         uint16_t i;
3284         int ret;
3285
3286         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3287                 txq = dev_data->tx_queues[i];
3288                 /* Don't operate the queue if not configured or
3289                  * if starting only per queue */
3290                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3291                         continue;
3292                 if (on)
3293                         ret = i40e_dev_tx_queue_start(dev, i);
3294                 else
3295                         ret = i40e_dev_tx_queue_stop(dev, i);
3296                 if ( ret != I40E_SUCCESS)
3297                         return ret;
3298         }
3299
3300         return I40E_SUCCESS;
3301 }
3302
3303 int
3304 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3305 {
3306         uint32_t reg;
3307         uint16_t j;
3308
3309         /* Wait until the request is finished */
3310         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3311                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3312                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3313                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3314                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3315                         break;
3316         }
3317
3318         if (on) {
3319                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3320                         return I40E_SUCCESS; /* Already on, skip next steps */
3321                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3322         } else {
3323                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3324                         return I40E_SUCCESS; /* Already off, skip next steps */
3325                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3326         }
3327
3328         /* Write the register */
3329         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3330         /* Check the result */
3331         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3332                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3333                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3334                 if (on) {
3335                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3336                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3337                                 break;
3338                 } else {
3339                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3340                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3341                                 break;
3342                 }
3343         }
3344
3345         /* Check if it is timeout */
3346         if (j >= I40E_CHK_Q_ENA_COUNT) {
3347                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3348                             (on ? "enable" : "disable"), q_idx);
3349                 return I40E_ERR_TIMEOUT;
3350         }
3351
3352         return I40E_SUCCESS;
3353 }
3354 /* Switch on or off the rx queues */
3355 static int
3356 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3357 {
3358         struct rte_eth_dev_data *dev_data = pf->dev_data;
3359         struct i40e_rx_queue *rxq;
3360         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3361         uint16_t i;
3362         int ret;
3363
3364         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3365                 rxq = dev_data->rx_queues[i];
3366                 /* Don't operate the queue if not configured or
3367                  * if starting only per queue */
3368                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3369                         continue;
3370                 if (on)
3371                         ret = i40e_dev_rx_queue_start(dev, i);
3372                 else
3373                         ret = i40e_dev_rx_queue_stop(dev, i);
3374                 if (ret != I40E_SUCCESS)
3375                         return ret;
3376         }
3377
3378         return I40E_SUCCESS;
3379 }
3380
3381 /* Switch on or off all the rx/tx queues */
3382 int
3383 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3384 {
3385         int ret;
3386
3387         if (on) {
3388                 /* enable rx queues before enabling tx queues */
3389                 ret = i40e_dev_switch_rx_queues(pf, on);
3390                 if (ret) {
3391                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3392                         return ret;
3393                 }
3394                 ret = i40e_dev_switch_tx_queues(pf, on);
3395         } else {
3396                 /* Stop tx queues before stopping rx queues */
3397                 ret = i40e_dev_switch_tx_queues(pf, on);
3398                 if (ret) {
3399                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3400                         return ret;
3401                 }
3402                 ret = i40e_dev_switch_rx_queues(pf, on);
3403         }
3404
3405         return ret;
3406 }
3407
3408 /* Initialize VSI for TX */
3409 static int
3410 i40e_dev_tx_init(struct i40e_pf *pf)
3411 {
3412         struct rte_eth_dev_data *data = pf->dev_data;
3413         uint16_t i;
3414         uint32_t ret = I40E_SUCCESS;
3415         struct i40e_tx_queue *txq;
3416
3417         for (i = 0; i < data->nb_tx_queues; i++) {
3418                 txq = data->tx_queues[i];
3419                 if (!txq || !txq->q_set)
3420                         continue;
3421                 ret = i40e_tx_queue_init(txq);
3422                 if (ret != I40E_SUCCESS)
3423                         break;
3424         }
3425
3426         return ret;
3427 }
3428
3429 /* Initialize VSI for RX */
3430 static int
3431 i40e_dev_rx_init(struct i40e_pf *pf)
3432 {
3433         struct rte_eth_dev_data *data = pf->dev_data;
3434         int ret = I40E_SUCCESS;
3435         uint16_t i;
3436         struct i40e_rx_queue *rxq;
3437
3438         i40e_pf_config_mq_rx(pf);
3439         for (i = 0; i < data->nb_rx_queues; i++) {
3440                 rxq = data->rx_queues[i];
3441                 if (!rxq || !rxq->q_set)
3442                         continue;
3443
3444                 ret = i40e_rx_queue_init(rxq);
3445                 if (ret != I40E_SUCCESS) {
3446                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3447                                     "initialization");
3448                         break;
3449                 }
3450         }
3451
3452         return ret;
3453 }
3454
3455 static int
3456 i40e_dev_rxtx_init(struct i40e_pf *pf)
3457 {
3458         int err;
3459
3460         err = i40e_dev_tx_init(pf);
3461         if (err) {
3462                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3463                 return err;
3464         }
3465         err = i40e_dev_rx_init(pf);
3466         if (err) {
3467                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3468                 return err;
3469         }
3470
3471         return err;
3472 }
3473
3474 static int
3475 i40e_vmdq_setup(struct rte_eth_dev *dev)
3476 {
3477         struct rte_eth_conf *conf = &dev->data->dev_conf;
3478         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3479         int i, err, conf_vsis, j, loop;
3480         struct i40e_vsi *vsi;
3481         struct i40e_vmdq_info *vmdq_info;
3482         struct rte_eth_vmdq_rx_conf *vmdq_conf;
3483         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3484
3485         /*
3486          * Disable interrupt to avoid message from VF. Furthermore, it will
3487          * avoid race condition in VSI creation/destroy.
3488          */
3489         i40e_pf_disable_irq0(hw);
3490
3491         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3492                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3493                 return -ENOTSUP;
3494         }
3495
3496         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3497         if (conf_vsis > pf->max_nb_vmdq_vsi) {
3498                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3499                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3500                         pf->max_nb_vmdq_vsi);
3501                 return -ENOTSUP;
3502         }
3503
3504         if (pf->vmdq != NULL) {
3505                 PMD_INIT_LOG(INFO, "VMDQ already configured");
3506                 return 0;
3507         }
3508
3509         pf->vmdq = rte_zmalloc("vmdq_info_struct",
3510                                 sizeof(*vmdq_info) * conf_vsis, 0);
3511
3512         if (pf->vmdq == NULL) {
3513                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3514                 return -ENOMEM;
3515         }
3516
3517         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3518
3519         /* Create VMDQ VSI */
3520         for (i = 0; i < conf_vsis; i++) {
3521                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3522                                 vmdq_conf->enable_loop_back);
3523                 if (vsi == NULL) {
3524                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3525                         err = -1;
3526                         goto err_vsi_setup;
3527                 }
3528                 vmdq_info = &pf->vmdq[i];
3529                 vmdq_info->pf = pf;
3530                 vmdq_info->vsi = vsi;
3531         }
3532         pf->nb_cfg_vmdq_vsi = conf_vsis;
3533
3534         /* Configure Vlan */
3535         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3536         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3537                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3538                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3539                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3540                                         vmdq_conf->pool_map[i].vlan_id, j);
3541
3542                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3543                                                 vmdq_conf->pool_map[i].vlan_id);
3544                                 if (err) {
3545                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
3546                                         err = -1;
3547                                         goto err_vsi_setup;
3548                                 }
3549                         }
3550                 }
3551         }
3552
3553         i40e_pf_enable_irq0(hw);
3554
3555         return 0;
3556
3557 err_vsi_setup:
3558         for (i = 0; i < conf_vsis; i++)
3559                 if (pf->vmdq[i].vsi == NULL)
3560                         break;
3561                 else
3562                         i40e_vsi_release(pf->vmdq[i].vsi);
3563
3564         rte_free(pf->vmdq);
3565         pf->vmdq = NULL;
3566         i40e_pf_enable_irq0(hw);
3567         return err;
3568 }
3569
3570 static void
3571 i40e_stat_update_32(struct i40e_hw *hw,
3572                    uint32_t reg,
3573                    bool offset_loaded,
3574                    uint64_t *offset,
3575                    uint64_t *stat)
3576 {
3577         uint64_t new_data;
3578
3579         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3580         if (!offset_loaded)
3581                 *offset = new_data;
3582
3583         if (new_data >= *offset)
3584                 *stat = (uint64_t)(new_data - *offset);
3585         else
3586                 *stat = (uint64_t)((new_data +
3587                         ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3588 }
3589
3590 static void
3591 i40e_stat_update_48(struct i40e_hw *hw,
3592                    uint32_t hireg,
3593                    uint32_t loreg,
3594                    bool offset_loaded,
3595                    uint64_t *offset,
3596                    uint64_t *stat)
3597 {
3598         uint64_t new_data;
3599
3600         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3601         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3602                         I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3603
3604         if (!offset_loaded)
3605                 *offset = new_data;
3606
3607         if (new_data >= *offset)
3608                 *stat = new_data - *offset;
3609         else
3610                 *stat = (uint64_t)((new_data +
3611                         ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3612
3613         *stat &= I40E_48_BIT_MASK;
3614 }
3615
3616 /* Disable IRQ0 */
3617 void
3618 i40e_pf_disable_irq0(struct i40e_hw *hw)
3619 {
3620         /* Disable all interrupt types */
3621         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3622         I40E_WRITE_FLUSH(hw);
3623 }
3624
3625 /* Enable IRQ0 */
3626 void
3627 i40e_pf_enable_irq0(struct i40e_hw *hw)
3628 {
3629         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3630                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3631                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3632                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3633         I40E_WRITE_FLUSH(hw);
3634 }
3635
3636 static void
3637 i40e_pf_config_irq0(struct i40e_hw *hw)
3638 {
3639         /* read pending request and disable first */
3640         i40e_pf_disable_irq0(hw);
3641         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3642         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3643                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3644
3645         /* Link no queues with irq0 */
3646         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3647                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3648 }
3649
3650 static void
3651 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3652 {
3653         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3654         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3655         int i;
3656         uint16_t abs_vf_id;
3657         uint32_t index, offset, val;
3658
3659         if (!pf->vfs)
3660                 return;
3661         /**
3662          * Try to find which VF trigger a reset, use absolute VF id to access
3663          * since the reg is global register.
3664          */
3665         for (i = 0; i < pf->vf_num; i++) {
3666                 abs_vf_id = hw->func_caps.vf_base_id + i;
3667                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3668                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3669                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3670                 /* VFR event occured */
3671                 if (val & (0x1 << offset)) {
3672                         int ret;
3673
3674                         /* Clear the event first */
3675                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3676                                                         (0x1 << offset));
3677                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3678                         /**
3679                          * Only notify a VF reset event occured,
3680                          * don't trigger another SW reset
3681                          */
3682                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3683                         if (ret != I40E_SUCCESS)
3684                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3685                 }
3686         }
3687 }
3688
3689 static void
3690 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3691 {
3692         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3693         struct i40e_arq_event_info info;
3694         uint16_t pending, opcode;
3695         int ret;
3696
3697         info.buf_len = I40E_AQ_BUF_SZ;
3698         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3699         if (!info.msg_buf) {
3700                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3701                 return;
3702         }
3703
3704         pending = 1;
3705         while (pending) {
3706                 ret = i40e_clean_arq_element(hw, &info, &pending);
3707
3708                 if (ret != I40E_SUCCESS) {
3709                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3710                                     "aq_err: %u", hw->aq.asq_last_status);
3711                         break;
3712                 }
3713                 opcode = rte_le_to_cpu_16(info.desc.opcode);
3714
3715                 switch (opcode) {
3716                 case i40e_aqc_opc_send_msg_to_pf:
3717                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3718                         i40e_pf_host_handle_vf_msg(dev,
3719                                         rte_le_to_cpu_16(info.desc.retval),
3720                                         rte_le_to_cpu_32(info.desc.cookie_high),
3721                                         rte_le_to_cpu_32(info.desc.cookie_low),
3722                                         info.msg_buf,
3723                                         info.msg_len);
3724                         break;
3725                 default:
3726                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3727                                     opcode);
3728                         break;
3729                 }
3730         }
3731         rte_free(info.msg_buf);
3732 }
3733
3734 /*
3735  * Interrupt handler is registered as the alarm callback for handling LSC
3736  * interrupt in a definite of time, in order to wait the NIC into a stable
3737  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3738  * no need for link down interrupt.
3739  */
3740 static void
3741 i40e_dev_interrupt_delayed_handler(void *param)
3742 {
3743         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3744         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3745         uint32_t icr0;
3746
3747         /* read interrupt causes again */
3748         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3749
3750 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3751         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3752                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3753         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3754                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3755         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3756                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3757         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3758                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3759         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3760                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3761                                                                 "state\n");
3762         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3763                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3764         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3765                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3766 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3767
3768         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3769                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3770                 i40e_dev_handle_vfr_event(dev);
3771         }
3772         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3773                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3774                 i40e_dev_handle_aq_msg(dev);
3775         }
3776
3777         /* handle the link up interrupt in an alarm callback */
3778         i40e_dev_link_update(dev, 0);
3779         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3780
3781         i40e_pf_enable_irq0(hw);
3782         rte_intr_enable(&(dev->pci_dev->intr_handle));
3783 }
3784
3785 /**
3786  * Interrupt handler triggered by NIC  for handling
3787  * specific interrupt.
3788  *
3789  * @param handle
3790  *  Pointer to interrupt handle.
3791  * @param param
3792  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3793  *
3794  * @return
3795  *  void
3796  */
3797 static void
3798 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3799                            void *param)
3800 {
3801         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3802         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3803         uint32_t icr0;
3804
3805         /* Disable interrupt */
3806         i40e_pf_disable_irq0(hw);
3807
3808         /* read out interrupt causes */
3809         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3810
3811         /* No interrupt event indicated */
3812         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3813                 PMD_DRV_LOG(INFO, "No interrupt event");
3814                 goto done;
3815         }
3816 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3817         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3818                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
3819         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3820                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
3821         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3822                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
3823         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3824                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
3825         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3826                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
3827         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3828                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
3829         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3830                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
3831 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3832
3833         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3834                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
3835                 i40e_dev_handle_vfr_event(dev);
3836         }
3837         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3838                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
3839                 i40e_dev_handle_aq_msg(dev);
3840         }
3841
3842         /* Link Status Change interrupt */
3843         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3844 #define I40E_US_PER_SECOND 1000000
3845                 struct rte_eth_link link;
3846
3847                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
3848                 memset(&link, 0, sizeof(link));
3849                 rte_i40e_dev_atomic_read_link_status(dev, &link);
3850                 i40e_dev_link_update(dev, 0);
3851
3852                 /*
3853                  * For link up interrupt, it needs to wait 1 second to let the
3854                  * hardware be a stable state. Otherwise several consecutive
3855                  * interrupts can be observed.
3856                  * For link down interrupt, no need to wait.
3857                  */
3858                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
3859                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
3860                         return;
3861                 else
3862                         _rte_eth_dev_callback_process(dev,
3863                                 RTE_ETH_EVENT_INTR_LSC);
3864         }
3865
3866 done:
3867         /* Enable interrupt */
3868         i40e_pf_enable_irq0(hw);
3869         rte_intr_enable(&(dev->pci_dev->intr_handle));
3870 }
3871
3872 static int
3873 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3874                          struct i40e_macvlan_filter *filter,
3875                          int total)
3876 {
3877         int ele_num, ele_buff_size;
3878         int num, actual_num, i;
3879         uint16_t flags;
3880         int ret = I40E_SUCCESS;
3881         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3882         struct i40e_aqc_add_macvlan_element_data *req_list;
3883
3884         if (filter == NULL  || total == 0)
3885                 return I40E_ERR_PARAM;
3886         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3887         ele_buff_size = hw->aq.asq_buf_size;
3888
3889         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3890         if (req_list == NULL) {
3891                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3892                 return I40E_ERR_NO_MEMORY;
3893         }
3894
3895         num = 0;
3896         do {
3897                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3898                 memset(req_list, 0, ele_buff_size);
3899
3900                 for (i = 0; i < actual_num; i++) {
3901                         (void)rte_memcpy(req_list[i].mac_addr,
3902                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3903                         req_list[i].vlan_tag =
3904                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3905
3906                         switch (filter[num + i].filter_type) {
3907                         case RTE_MAC_PERFECT_MATCH:
3908                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
3909                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3910                                 break;
3911                         case RTE_MACVLAN_PERFECT_MATCH:
3912                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3913                                 break;
3914                         case RTE_MAC_HASH_MATCH:
3915                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
3916                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3917                                 break;
3918                         case RTE_MACVLAN_HASH_MATCH:
3919                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
3920                                 break;
3921                         default:
3922                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
3923                                 ret = I40E_ERR_PARAM;
3924                                 goto DONE;
3925                         }
3926
3927                         req_list[i].queue_number = 0;
3928
3929                         req_list[i].flags = rte_cpu_to_le_16(flags);
3930                 }
3931
3932                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3933                                                 actual_num, NULL);
3934                 if (ret != I40E_SUCCESS) {
3935                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3936                         goto DONE;
3937                 }
3938                 num += actual_num;
3939         } while (num < total);
3940
3941 DONE:
3942         rte_free(req_list);
3943         return ret;
3944 }
3945
3946 static int
3947 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3948                             struct i40e_macvlan_filter *filter,
3949                             int total)
3950 {
3951         int ele_num, ele_buff_size;
3952         int num, actual_num, i;
3953         uint16_t flags;
3954         int ret = I40E_SUCCESS;
3955         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3956         struct i40e_aqc_remove_macvlan_element_data *req_list;
3957
3958         if (filter == NULL  || total == 0)
3959                 return I40E_ERR_PARAM;
3960
3961         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3962         ele_buff_size = hw->aq.asq_buf_size;
3963
3964         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3965         if (req_list == NULL) {
3966                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3967                 return I40E_ERR_NO_MEMORY;
3968         }
3969
3970         num = 0;
3971         do {
3972                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3973                 memset(req_list, 0, ele_buff_size);
3974
3975                 for (i = 0; i < actual_num; i++) {
3976                         (void)rte_memcpy(req_list[i].mac_addr,
3977                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3978                         req_list[i].vlan_tag =
3979                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3980
3981                         switch (filter[num + i].filter_type) {
3982                         case RTE_MAC_PERFECT_MATCH:
3983                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3984                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3985                                 break;
3986                         case RTE_MACVLAN_PERFECT_MATCH:
3987                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3988                                 break;
3989                         case RTE_MAC_HASH_MATCH:
3990                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
3991                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3992                                 break;
3993                         case RTE_MACVLAN_HASH_MATCH:
3994                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
3995                                 break;
3996                         default:
3997                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
3998                                 ret = I40E_ERR_PARAM;
3999                                 goto DONE;
4000                         }
4001                         req_list[i].flags = rte_cpu_to_le_16(flags);
4002                 }
4003
4004                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4005                                                 actual_num, NULL);
4006                 if (ret != I40E_SUCCESS) {
4007                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4008                         goto DONE;
4009                 }
4010                 num += actual_num;
4011         } while (num < total);
4012
4013 DONE:
4014         rte_free(req_list);
4015         return ret;
4016 }
4017
4018 /* Find out specific MAC filter */
4019 static struct i40e_mac_filter *
4020 i40e_find_mac_filter(struct i40e_vsi *vsi,
4021                          struct ether_addr *macaddr)
4022 {
4023         struct i40e_mac_filter *f;
4024
4025         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4026                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4027                         return f;
4028         }
4029
4030         return NULL;
4031 }
4032
4033 static bool
4034 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4035                          uint16_t vlan_id)
4036 {
4037         uint32_t vid_idx, vid_bit;
4038
4039         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
4040         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
4041
4042         if (vsi->vfta[vid_idx] & vid_bit)
4043                 return 1;
4044         else
4045                 return 0;
4046 }
4047
4048 static void
4049 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4050                          uint16_t vlan_id, bool on)
4051 {
4052         uint32_t vid_idx, vid_bit;
4053
4054 #define UINT32_BIT_MASK      0x1F
4055 #define VALID_VLAN_BIT_MASK  0xFFF
4056         /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
4057          *  element first, then find the bits it belongs to
4058          */
4059         vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
4060                   sizeof(uint32_t));
4061         vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
4062
4063         if (on)
4064                 vsi->vfta[vid_idx] |= vid_bit;
4065         else
4066                 vsi->vfta[vid_idx] &= ~vid_bit;
4067 }
4068
4069 /**
4070  * Find all vlan options for specific mac addr,
4071  * return with actual vlan found.
4072  */
4073 static inline int
4074 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4075                            struct i40e_macvlan_filter *mv_f,
4076                            int num, struct ether_addr *addr)
4077 {
4078         int i;
4079         uint32_t j, k;
4080
4081         /**
4082          * Not to use i40e_find_vlan_filter to decrease the loop time,
4083          * although the code looks complex.
4084           */
4085         if (num < vsi->vlan_num)
4086                 return I40E_ERR_PARAM;
4087
4088         i = 0;
4089         for (j = 0; j < I40E_VFTA_SIZE; j++) {
4090                 if (vsi->vfta[j]) {
4091                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4092                                 if (vsi->vfta[j] & (1 << k)) {
4093                                         if (i > num - 1) {
4094                                                 PMD_DRV_LOG(ERR, "vlan number "
4095                                                             "not match");
4096                                                 return I40E_ERR_PARAM;
4097                                         }
4098                                         (void)rte_memcpy(&mv_f[i].macaddr,
4099                                                         addr, ETH_ADDR_LEN);
4100                                         mv_f[i].vlan_id =
4101                                                 j * I40E_UINT32_BIT_SIZE + k;
4102                                         i++;
4103                                 }
4104                         }
4105                 }
4106         }
4107         return I40E_SUCCESS;
4108 }
4109
4110 static inline int
4111 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4112                            struct i40e_macvlan_filter *mv_f,
4113                            int num,
4114                            uint16_t vlan)
4115 {
4116         int i = 0;
4117         struct i40e_mac_filter *f;
4118
4119         if (num < vsi->mac_num)
4120                 return I40E_ERR_PARAM;
4121
4122         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4123                 if (i > num - 1) {
4124                         PMD_DRV_LOG(ERR, "buffer number not match");
4125                         return I40E_ERR_PARAM;
4126                 }
4127                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4128                                 ETH_ADDR_LEN);
4129                 mv_f[i].vlan_id = vlan;
4130                 mv_f[i].filter_type = f->mac_info.filter_type;
4131                 i++;
4132         }
4133
4134         return I40E_SUCCESS;
4135 }
4136
4137 static int
4138 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4139 {
4140         int i, num;
4141         struct i40e_mac_filter *f;
4142         struct i40e_macvlan_filter *mv_f;
4143         int ret = I40E_SUCCESS;
4144
4145         if (vsi == NULL || vsi->mac_num == 0)
4146                 return I40E_ERR_PARAM;
4147
4148         /* Case that no vlan is set */
4149         if (vsi->vlan_num == 0)
4150                 num = vsi->mac_num;
4151         else
4152                 num = vsi->mac_num * vsi->vlan_num;
4153
4154         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4155         if (mv_f == NULL) {
4156                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4157                 return I40E_ERR_NO_MEMORY;
4158         }
4159
4160         i = 0;
4161         if (vsi->vlan_num == 0) {
4162                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4163                         (void)rte_memcpy(&mv_f[i].macaddr,
4164                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4165                         mv_f[i].vlan_id = 0;
4166                         i++;
4167                 }
4168         } else {
4169                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4170                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4171                                         vsi->vlan_num, &f->mac_info.mac_addr);
4172                         if (ret != I40E_SUCCESS)
4173                                 goto DONE;
4174                         i += vsi->vlan_num;
4175                 }
4176         }
4177
4178         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4179 DONE:
4180         rte_free(mv_f);
4181
4182         return ret;
4183 }
4184
4185 int
4186 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4187 {
4188         struct i40e_macvlan_filter *mv_f;
4189         int mac_num;
4190         int ret = I40E_SUCCESS;
4191
4192         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4193                 return I40E_ERR_PARAM;
4194
4195         /* If it's already set, just return */
4196         if (i40e_find_vlan_filter(vsi,vlan))
4197                 return I40E_SUCCESS;
4198
4199         mac_num = vsi->mac_num;
4200
4201         if (mac_num == 0) {
4202                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4203                 return I40E_ERR_PARAM;
4204         }
4205
4206         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4207
4208         if (mv_f == NULL) {
4209                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4210                 return I40E_ERR_NO_MEMORY;
4211         }
4212
4213         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4214
4215         if (ret != I40E_SUCCESS)
4216                 goto DONE;
4217
4218         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4219
4220         if (ret != I40E_SUCCESS)
4221                 goto DONE;
4222
4223         i40e_set_vlan_filter(vsi, vlan, 1);
4224
4225         vsi->vlan_num++;
4226         ret = I40E_SUCCESS;
4227 DONE:
4228         rte_free(mv_f);
4229         return ret;
4230 }
4231
4232 int
4233 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4234 {
4235         struct i40e_macvlan_filter *mv_f;
4236         int mac_num;
4237         int ret = I40E_SUCCESS;
4238
4239         /**
4240          * Vlan 0 is the generic filter for untagged packets
4241          * and can't be removed.
4242          */
4243         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4244                 return I40E_ERR_PARAM;
4245
4246         /* If can't find it, just return */
4247         if (!i40e_find_vlan_filter(vsi, vlan))
4248                 return I40E_ERR_PARAM;
4249
4250         mac_num = vsi->mac_num;
4251
4252         if (mac_num == 0) {
4253                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4254                 return I40E_ERR_PARAM;
4255         }
4256
4257         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4258
4259         if (mv_f == NULL) {
4260                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4261                 return I40E_ERR_NO_MEMORY;
4262         }
4263
4264         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4265
4266         if (ret != I40E_SUCCESS)
4267                 goto DONE;
4268
4269         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4270
4271         if (ret != I40E_SUCCESS)
4272                 goto DONE;
4273
4274         /* This is last vlan to remove, replace all mac filter with vlan 0 */
4275         if (vsi->vlan_num == 1) {
4276                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4277                 if (ret != I40E_SUCCESS)
4278                         goto DONE;
4279
4280                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4281                 if (ret != I40E_SUCCESS)
4282                         goto DONE;
4283         }
4284
4285         i40e_set_vlan_filter(vsi, vlan, 0);
4286
4287         vsi->vlan_num--;
4288         ret = I40E_SUCCESS;
4289 DONE:
4290         rte_free(mv_f);
4291         return ret;
4292 }
4293
4294 int
4295 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4296 {
4297         struct i40e_mac_filter *f;
4298         struct i40e_macvlan_filter *mv_f;
4299         int i, vlan_num = 0;
4300         int ret = I40E_SUCCESS;
4301
4302         /* If it's add and we've config it, return */
4303         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4304         if (f != NULL)
4305                 return I40E_SUCCESS;
4306         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4307                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4308
4309                 /**
4310                  * If vlan_num is 0, that's the first time to add mac,
4311                  * set mask for vlan_id 0.
4312                  */
4313                 if (vsi->vlan_num == 0) {
4314                         i40e_set_vlan_filter(vsi, 0, 1);
4315                         vsi->vlan_num = 1;
4316                 }
4317                 vlan_num = vsi->vlan_num;
4318         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4319                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4320                 vlan_num = 1;
4321
4322         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4323         if (mv_f == NULL) {
4324                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4325                 return I40E_ERR_NO_MEMORY;
4326         }
4327
4328         for (i = 0; i < vlan_num; i++) {
4329                 mv_f[i].filter_type = mac_filter->filter_type;
4330                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4331                                 ETH_ADDR_LEN);
4332         }
4333
4334         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4335                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4336                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4337                                         &mac_filter->mac_addr);
4338                 if (ret != I40E_SUCCESS)
4339                         goto DONE;
4340         }
4341
4342         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4343         if (ret != I40E_SUCCESS)
4344                 goto DONE;
4345
4346         /* Add the mac addr into mac list */
4347         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4348         if (f == NULL) {
4349                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4350                 ret = I40E_ERR_NO_MEMORY;
4351                 goto DONE;
4352         }
4353         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4354                         ETH_ADDR_LEN);
4355         f->mac_info.filter_type = mac_filter->filter_type;
4356         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4357         vsi->mac_num++;
4358
4359         ret = I40E_SUCCESS;
4360 DONE:
4361         rte_free(mv_f);
4362
4363         return ret;
4364 }
4365
4366 int
4367 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4368 {
4369         struct i40e_mac_filter *f;
4370         struct i40e_macvlan_filter *mv_f;
4371         int i, vlan_num;
4372         enum rte_mac_filter_type filter_type;
4373         int ret = I40E_SUCCESS;
4374
4375         /* Can't find it, return an error */
4376         f = i40e_find_mac_filter(vsi, addr);
4377         if (f == NULL)
4378                 return I40E_ERR_PARAM;
4379
4380         vlan_num = vsi->vlan_num;
4381         filter_type = f->mac_info.filter_type;
4382         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4383                 filter_type == RTE_MACVLAN_HASH_MATCH) {
4384                 if (vlan_num == 0) {
4385                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4386                         return I40E_ERR_PARAM;
4387                 }
4388         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4389                         filter_type == RTE_MAC_HASH_MATCH)
4390                 vlan_num = 1;
4391
4392         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4393         if (mv_f == NULL) {
4394                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4395                 return I40E_ERR_NO_MEMORY;
4396         }
4397
4398         for (i = 0; i < vlan_num; i++) {
4399                 mv_f[i].filter_type = filter_type;
4400                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4401                                 ETH_ADDR_LEN);
4402         }
4403         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4404                         filter_type == RTE_MACVLAN_HASH_MATCH) {
4405                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4406                 if (ret != I40E_SUCCESS)
4407                         goto DONE;
4408         }
4409
4410         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4411         if (ret != I40E_SUCCESS)
4412                 goto DONE;
4413
4414         /* Remove the mac addr into mac list */
4415         TAILQ_REMOVE(&vsi->mac_list, f, next);
4416         rte_free(f);
4417         vsi->mac_num--;
4418
4419         ret = I40E_SUCCESS;
4420 DONE:
4421         rte_free(mv_f);
4422         return ret;
4423 }
4424
4425 /* Configure hash enable flags for RSS */
4426 uint64_t
4427 i40e_config_hena(uint64_t flags)
4428 {
4429         uint64_t hena = 0;
4430
4431         if (!flags)
4432                 return hena;
4433
4434         if (flags & ETH_RSS_NONF_IPV4_UDP)
4435                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4436         if (flags & ETH_RSS_NONF_IPV4_TCP)
4437                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4438         if (flags & ETH_RSS_NONF_IPV4_SCTP)
4439                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4440         if (flags & ETH_RSS_NONF_IPV4_OTHER)
4441                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4442         if (flags & ETH_RSS_FRAG_IPV4)
4443                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4444         if (flags & ETH_RSS_NONF_IPV6_UDP)
4445                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4446         if (flags & ETH_RSS_NONF_IPV6_TCP)
4447                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4448         if (flags & ETH_RSS_NONF_IPV6_SCTP)
4449                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4450         if (flags & ETH_RSS_NONF_IPV6_OTHER)
4451                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4452         if (flags & ETH_RSS_FRAG_IPV6)
4453                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4454         if (flags & ETH_RSS_L2_PAYLOAD)
4455                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4456
4457         return hena;
4458 }
4459
4460 /* Parse the hash enable flags */
4461 uint64_t
4462 i40e_parse_hena(uint64_t flags)
4463 {
4464         uint64_t rss_hf = 0;
4465
4466         if (!flags)
4467                 return rss_hf;
4468
4469         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4470                 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
4471         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4472                 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
4473         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4474                 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
4475         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4476                 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
4477         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4478                 rss_hf |= ETH_RSS_FRAG_IPV4;
4479         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4480                 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
4481         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4482                 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
4483         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4484                 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
4485         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4486                 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
4487         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4488                 rss_hf |= ETH_RSS_FRAG_IPV6;
4489         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4490                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4491
4492         return rss_hf;
4493 }
4494
4495 /* Disable RSS */
4496 static void
4497 i40e_pf_disable_rss(struct i40e_pf *pf)
4498 {
4499         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4500         uint64_t hena;
4501
4502         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4503         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4504         hena &= ~I40E_RSS_HENA_ALL;
4505         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4506         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4507         I40E_WRITE_FLUSH(hw);
4508 }
4509
4510 static int
4511 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4512 {
4513         uint32_t *hash_key;
4514         uint8_t hash_key_len;
4515         uint64_t rss_hf;
4516         uint16_t i;
4517         uint64_t hena;
4518
4519         hash_key = (uint32_t *)(rss_conf->rss_key);
4520         hash_key_len = rss_conf->rss_key_len;
4521         if (hash_key != NULL && hash_key_len >=
4522                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4523                 /* Fill in RSS hash key */
4524                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4525                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4526         }
4527
4528         rss_hf = rss_conf->rss_hf;
4529         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4530         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4531         hena &= ~I40E_RSS_HENA_ALL;
4532         hena |= i40e_config_hena(rss_hf);
4533         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4534         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4535         I40E_WRITE_FLUSH(hw);
4536
4537         return 0;
4538 }
4539
4540 static int
4541 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4542                          struct rte_eth_rss_conf *rss_conf)
4543 {
4544         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4545         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4546         uint64_t hena;
4547
4548         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4549         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4550         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4551                 if (rss_hf != 0) /* Enable RSS */
4552                         return -EINVAL;
4553                 return 0; /* Nothing to do */
4554         }
4555         /* RSS enabled */
4556         if (rss_hf == 0) /* Disable RSS */
4557                 return -EINVAL;
4558
4559         return i40e_hw_rss_hash_set(hw, rss_conf);
4560 }
4561
4562 static int
4563 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4564                            struct rte_eth_rss_conf *rss_conf)
4565 {
4566         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4567         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4568         uint64_t hena;
4569         uint16_t i;
4570
4571         if (hash_key != NULL) {
4572                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4573                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4574                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4575         }
4576         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4577         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4578         rss_conf->rss_hf = i40e_parse_hena(hena);
4579
4580         return 0;
4581 }
4582
4583 static int
4584 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4585 {
4586         switch (filter_type) {
4587         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4588                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4589                 break;
4590         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4591                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4592                 break;
4593         case RTE_TUNNEL_FILTER_IMAC_TENID:
4594                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4595                 break;
4596         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4597                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4598                 break;
4599         case ETH_TUNNEL_FILTER_IMAC:
4600                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4601                 break;
4602         default:
4603                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4604                 return -EINVAL;
4605         }
4606
4607         return 0;
4608 }
4609
4610 static int
4611 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4612                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4613                         uint8_t add)
4614 {
4615         uint16_t ip_type;
4616         uint8_t tun_type = 0;
4617         int val, ret = 0;
4618         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4619         struct i40e_vsi *vsi = pf->main_vsi;
4620         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4621         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4622
4623         cld_filter = rte_zmalloc("tunnel_filter",
4624                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4625                 0);
4626
4627         if (NULL == cld_filter) {
4628                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4629                 return -EINVAL;
4630         }
4631         pfilter = cld_filter;
4632
4633         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4634                         sizeof(struct ether_addr));
4635         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4636                         sizeof(struct ether_addr));
4637
4638         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4639         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4640                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4641                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4642                                 &tunnel_filter->ip_addr,
4643                                 sizeof(pfilter->ipaddr.v4.data));
4644         } else {
4645                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4646                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4647                                 &tunnel_filter->ip_addr,
4648                                 sizeof(pfilter->ipaddr.v6.data));
4649         }
4650
4651         /* check tunneled type */
4652         switch (tunnel_filter->tunnel_type) {
4653         case RTE_TUNNEL_TYPE_VXLAN:
4654                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4655                 break;
4656         default:
4657                 /* Other tunnel types is not supported. */
4658                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4659                 rte_free(cld_filter);
4660                 return -EINVAL;
4661         }
4662
4663         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4664                                                 &pfilter->flags);
4665         if (val < 0) {
4666                 rte_free(cld_filter);
4667                 return -EINVAL;
4668         }
4669
4670         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4671                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4672         pfilter->tenant_id = tunnel_filter->tenant_id;
4673         pfilter->queue_number = tunnel_filter->queue_id;
4674
4675         if (add)
4676                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4677         else
4678                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4679                                                 cld_filter, 1);
4680
4681         rte_free(cld_filter);
4682         return ret;
4683 }
4684
4685 static int
4686 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4687 {
4688         uint8_t i;
4689
4690         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4691                 if (pf->vxlan_ports[i] == port)
4692                         return i;
4693         }
4694
4695         return -1;
4696 }
4697
4698 static int
4699 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4700 {
4701         int  idx, ret;
4702         uint8_t filter_idx;
4703         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4704
4705         idx = i40e_get_vxlan_port_idx(pf, port);
4706
4707         /* Check if port already exists */
4708         if (idx >= 0) {
4709                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4710                 return -EINVAL;
4711         }
4712
4713         /* Now check if there is space to add the new port */
4714         idx = i40e_get_vxlan_port_idx(pf, 0);
4715         if (idx < 0) {
4716                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4717                         "not adding port %d", port);
4718                 return -ENOSPC;
4719         }
4720
4721         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4722                                         &filter_idx, NULL);
4723         if (ret < 0) {
4724                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4725                 return -1;
4726         }
4727
4728         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
4729                          port,  filter_idx);
4730
4731         /* New port: add it and mark its index in the bitmap */
4732         pf->vxlan_ports[idx] = port;
4733         pf->vxlan_bitmap |= (1 << idx);
4734
4735         if (!(pf->flags & I40E_FLAG_VXLAN))
4736                 pf->flags |= I40E_FLAG_VXLAN;
4737
4738         return 0;
4739 }
4740
4741 static int
4742 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4743 {
4744         int idx;
4745         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4746
4747         if (!(pf->flags & I40E_FLAG_VXLAN)) {
4748                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4749                 return -EINVAL;
4750         }
4751
4752         idx = i40e_get_vxlan_port_idx(pf, port);
4753
4754         if (idx < 0) {
4755                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4756                 return -EINVAL;
4757         }
4758
4759         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4760                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4761                 return -1;
4762         }
4763
4764         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4765                         port, idx);
4766
4767         pf->vxlan_ports[idx] = 0;
4768         pf->vxlan_bitmap &= ~(1 << idx);
4769
4770         if (!pf->vxlan_bitmap)
4771                 pf->flags &= ~I40E_FLAG_VXLAN;
4772
4773         return 0;
4774 }
4775
4776 /* Add UDP tunneling port */
4777 static int
4778 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4779                         struct rte_eth_udp_tunnel *udp_tunnel)
4780 {
4781         int ret = 0;
4782         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4783
4784         if (udp_tunnel == NULL)
4785                 return -EINVAL;
4786
4787         switch (udp_tunnel->prot_type) {
4788         case RTE_TUNNEL_TYPE_VXLAN:
4789                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4790                 break;
4791
4792         case RTE_TUNNEL_TYPE_GENEVE:
4793         case RTE_TUNNEL_TYPE_TEREDO:
4794                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4795                 ret = -1;
4796                 break;
4797
4798         default:
4799                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4800                 ret = -1;
4801                 break;
4802         }
4803
4804         return ret;
4805 }
4806
4807 /* Remove UDP tunneling port */
4808 static int
4809 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4810                         struct rte_eth_udp_tunnel *udp_tunnel)
4811 {
4812         int ret = 0;
4813         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4814
4815         if (udp_tunnel == NULL)
4816                 return -EINVAL;
4817
4818         switch (udp_tunnel->prot_type) {
4819         case RTE_TUNNEL_TYPE_VXLAN:
4820                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4821                 break;
4822         case RTE_TUNNEL_TYPE_GENEVE:
4823         case RTE_TUNNEL_TYPE_TEREDO:
4824                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4825                 ret = -1;
4826                 break;
4827         default:
4828                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4829                 ret = -1;
4830                 break;
4831         }
4832
4833         return ret;
4834 }
4835
4836 /* Calculate the maximum number of contiguous PF queues that are configured */
4837 static int
4838 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
4839 {
4840         struct rte_eth_dev_data *data = pf->dev_data;
4841         int i, num;
4842         struct i40e_rx_queue *rxq;
4843
4844         num = 0;
4845         for (i = 0; i < pf->lan_nb_qps; i++) {
4846                 rxq = data->rx_queues[i];
4847                 if (rxq && rxq->q_set)
4848                         num++;
4849                 else
4850                         break;
4851         }
4852
4853         return num;
4854 }
4855
4856 /* Configure RSS */
4857 static int
4858 i40e_pf_config_rss(struct i40e_pf *pf)
4859 {
4860         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4861         struct rte_eth_rss_conf rss_conf;
4862         uint32_t i, lut = 0;
4863         uint16_t j, num;
4864
4865         /*
4866          * If both VMDQ and RSS enabled, not all of PF queues are configured.
4867          * It's necessary to calulate the actual PF queues that are configured.
4868          */
4869         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
4870                 num = i40e_pf_calc_configured_queues_num(pf);
4871                 num = i40e_align_floor(num);
4872         } else
4873                 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
4874
4875         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
4876                         num);
4877
4878         if (num == 0) {
4879                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
4880                 return -ENOTSUP;
4881         }
4882
4883         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4884                 if (j == num)
4885                         j = 0;
4886                 lut = (lut << 8) | (j & ((0x1 <<
4887                         hw->func_caps.rss_table_entry_width) - 1));
4888                 if ((i & 3) == 3)
4889                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4890         }
4891
4892         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4893         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4894                 i40e_pf_disable_rss(pf);
4895                 return 0;
4896         }
4897         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4898                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4899                 /* Calculate the default hash key */
4900                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4901                         rss_key_default[i] = (uint32_t)rte_rand();
4902                 rss_conf.rss_key = (uint8_t *)rss_key_default;
4903                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4904                                                         sizeof(uint32_t);
4905         }
4906
4907         return i40e_hw_rss_hash_set(hw, &rss_conf);
4908 }
4909
4910 static int
4911 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
4912                         struct rte_eth_tunnel_filter_conf *filter)
4913 {
4914         if (pf == NULL || filter == NULL) {
4915                 PMD_DRV_LOG(ERR, "Invalid parameter");
4916                 return -EINVAL;
4917         }
4918
4919         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
4920                 PMD_DRV_LOG(ERR, "Invalid queue ID");
4921                 return -EINVAL;
4922         }
4923
4924         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
4925                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
4926                 return -EINVAL;
4927         }
4928
4929         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
4930                 (is_zero_ether_addr(filter->outer_mac))) {
4931                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
4932                 return -EINVAL;
4933         }
4934
4935         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
4936                 (is_zero_ether_addr(filter->inner_mac))) {
4937                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
4938                 return -EINVAL;
4939         }
4940
4941         return 0;
4942 }
4943
4944 static int
4945 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4946                         void *arg)
4947 {
4948         struct rte_eth_tunnel_filter_conf *filter;
4949         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4950         int ret = I40E_SUCCESS;
4951
4952         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
4953
4954         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
4955                 return I40E_ERR_PARAM;
4956
4957         switch (filter_op) {
4958         case RTE_ETH_FILTER_NOP:
4959                 if (!(pf->flags & I40E_FLAG_VXLAN))
4960                         ret = I40E_NOT_SUPPORTED;
4961         case RTE_ETH_FILTER_ADD:
4962                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
4963                 break;
4964         case RTE_ETH_FILTER_DELETE:
4965                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
4966                 break;
4967         default:
4968                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4969                 ret = I40E_ERR_PARAM;
4970                 break;
4971         }
4972
4973         return ret;
4974 }
4975
4976 static int
4977 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4978 {
4979         int ret = 0;
4980         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
4981
4982         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
4983                 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
4984                 return -ENOTSUP;
4985         }
4986
4987         /* RSS setup */
4988         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
4989                 ret = i40e_pf_config_rss(pf);
4990         else
4991                 i40e_pf_disable_rss(pf);
4992
4993         return ret;
4994 }
4995
4996 static int
4997 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4998                      enum rte_filter_type filter_type,
4999                      enum rte_filter_op filter_op,
5000                      void *arg)
5001 {
5002         int ret = 0;
5003
5004         if (dev == NULL)
5005                 return -EINVAL;
5006
5007         switch (filter_type) {
5008         case RTE_ETH_FILTER_MACVLAN:
5009                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5010                 break;
5011         case RTE_ETH_FILTER_TUNNEL:
5012                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5013                 break;
5014         default:
5015                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5016                                                         filter_type);
5017                 ret = -EINVAL;
5018                 break;
5019         }
5020
5021         return ret;
5022 }