20c99a4c0bcea8a44962825fcf1667a1a2ec5a0b
[dpdk.git] / lib / librte_pmd_i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_dev.h>
51 #include <rte_eth_ctrl.h>
52
53 #include "i40e_logs.h"
54 #include "i40e/i40e_register_x710_int.h"
55 #include "i40e/i40e_prototype.h"
56 #include "i40e/i40e_adminq_cmd.h"
57 #include "i40e/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
60 #include "i40e_pf.h"
61
62 #define I40E_DEFAULT_RX_FREE_THRESH  32
63 #define I40E_DEFAULT_RX_PTHRESH      8
64 #define I40E_DEFAULT_RX_HTHRESH      8
65 #define I40E_DEFAULT_RX_WTHRESH      0
66
67 #define I40E_DEFAULT_TX_FREE_THRESH  32
68 #define I40E_DEFAULT_TX_PTHRESH      32
69 #define I40E_DEFAULT_TX_HTHRESH      0
70 #define I40E_DEFAULT_TX_WTHRESH      0
71 #define I40E_DEFAULT_TX_RSBIT_THRESH 32
72
73 /* Maximun number of MAC addresses */
74 #define I40E_NUM_MACADDR_MAX       64
75 #define I40E_CLEAR_PXE_WAIT_MS     200
76
77 /* Maximun number of capability elements */
78 #define I40E_MAX_CAP_ELE_NUM       128
79
80 /* Wait count and inteval */
81 #define I40E_CHK_Q_ENA_COUNT       1000
82 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
83
84 /* Maximun number of VSI */
85 #define I40E_MAX_NUM_VSIS          (384UL)
86
87 /* Bit shift and mask */
88 #define I40E_16_BIT_SHIFT 16
89 #define I40E_16_BIT_MASK  0xFFFF
90 #define I40E_32_BIT_SHIFT 32
91 #define I40E_32_BIT_MASK  0xFFFFFFFF
92 #define I40E_48_BIT_SHIFT 48
93 #define I40E_48_BIT_MASK  0xFFFFFFFFFFFFULL
94
95 /* Default queue interrupt throttling time in microseconds*/
96 #define I40E_ITR_INDEX_DEFAULT          0
97 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
98 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
99
100 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
101
102 static int eth_i40e_dev_init(\
103                         __attribute__((unused)) struct eth_driver *eth_drv,
104                         struct rte_eth_dev *eth_dev);
105 static int i40e_dev_configure(struct rte_eth_dev *dev);
106 static int i40e_dev_start(struct rte_eth_dev *dev);
107 static void i40e_dev_stop(struct rte_eth_dev *dev);
108 static void i40e_dev_close(struct rte_eth_dev *dev);
109 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
110 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
111 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
112 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
113 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
114 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
115 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
116                                struct rte_eth_stats *stats);
117 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
118 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
119                                             uint16_t queue_id,
120                                             uint8_t stat_idx,
121                                             uint8_t is_rx);
122 static void i40e_dev_info_get(struct rte_eth_dev *dev,
123                               struct rte_eth_dev_info *dev_info);
124 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
125                                 uint16_t vlan_id,
126                                 int on);
127 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
128 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
129 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
130                                       uint16_t queue,
131                                       int on);
132 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
133 static int i40e_dev_led_on(struct rte_eth_dev *dev);
134 static int i40e_dev_led_off(struct rte_eth_dev *dev);
135 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
136                               struct rte_eth_fc_conf *fc_conf);
137 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
138                                        struct rte_eth_pfc_conf *pfc_conf);
139 static void i40e_macaddr_add(struct rte_eth_dev *dev,
140                           struct ether_addr *mac_addr,
141                           uint32_t index,
142                           uint32_t pool);
143 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
144 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
145                                     struct rte_eth_rss_reta *reta_conf);
146 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
147                                    struct rte_eth_rss_reta *reta_conf);
148
149 static int i40e_get_cap(struct i40e_hw *hw);
150 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
151 static int i40e_pf_setup(struct i40e_pf *pf);
152 static int i40e_vsi_init(struct i40e_vsi *vsi);
153 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
154                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
155 static void i40e_stat_update_48(struct i40e_hw *hw,
156                                uint32_t hireg,
157                                uint32_t loreg,
158                                bool offset_loaded,
159                                uint64_t *offset,
160                                uint64_t *stat);
161 static void i40e_pf_config_irq0(struct i40e_hw *hw);
162 static void i40e_dev_interrupt_handler(
163                 __rte_unused struct rte_intr_handle *handle, void *param);
164 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
165                                 uint32_t base, uint32_t num);
166 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
167 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
168                         uint32_t base);
169 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
170                         uint16_t num);
171 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
172 static int i40e_veb_release(struct i40e_veb *veb);
173 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
174                                                 struct i40e_vsi *vsi);
175 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
176 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
177 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
178                                              struct i40e_macvlan_filter *mv_f,
179                                              int num,
180                                              struct ether_addr *addr);
181 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
182                                              struct i40e_macvlan_filter *mv_f,
183                                              int num,
184                                              uint16_t vlan);
185 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
186 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
187                                     struct rte_eth_rss_conf *rss_conf);
188 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
189                                       struct rte_eth_rss_conf *rss_conf);
190 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
191                                 struct rte_eth_udp_tunnel *udp_tunnel);
192 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
193                                 struct rte_eth_udp_tunnel *udp_tunnel);
194 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
195                                 enum rte_filter_type filter_type,
196                                 enum rte_filter_op filter_op,
197                                 void *arg);
198
199 /* Default hash key buffer for RSS */
200 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
201
202 static struct rte_pci_id pci_id_i40e_map[] = {
203 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
204 #include "rte_pci_dev_ids.h"
205 { .vendor_id = 0, /* sentinel */ },
206 };
207
208 static struct eth_dev_ops i40e_eth_dev_ops = {
209         .dev_configure                = i40e_dev_configure,
210         .dev_start                    = i40e_dev_start,
211         .dev_stop                     = i40e_dev_stop,
212         .dev_close                    = i40e_dev_close,
213         .promiscuous_enable           = i40e_dev_promiscuous_enable,
214         .promiscuous_disable          = i40e_dev_promiscuous_disable,
215         .allmulticast_enable          = i40e_dev_allmulticast_enable,
216         .allmulticast_disable         = i40e_dev_allmulticast_disable,
217         .dev_set_link_up              = i40e_dev_set_link_up,
218         .dev_set_link_down            = i40e_dev_set_link_down,
219         .link_update                  = i40e_dev_link_update,
220         .stats_get                    = i40e_dev_stats_get,
221         .stats_reset                  = i40e_dev_stats_reset,
222         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
223         .dev_infos_get                = i40e_dev_info_get,
224         .vlan_filter_set              = i40e_vlan_filter_set,
225         .vlan_tpid_set                = i40e_vlan_tpid_set,
226         .vlan_offload_set             = i40e_vlan_offload_set,
227         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
228         .vlan_pvid_set                = i40e_vlan_pvid_set,
229         .rx_queue_start               = i40e_dev_rx_queue_start,
230         .rx_queue_stop                = i40e_dev_rx_queue_stop,
231         .tx_queue_start               = i40e_dev_tx_queue_start,
232         .tx_queue_stop                = i40e_dev_tx_queue_stop,
233         .rx_queue_setup               = i40e_dev_rx_queue_setup,
234         .rx_queue_release             = i40e_dev_rx_queue_release,
235         .rx_queue_count               = i40e_dev_rx_queue_count,
236         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
237         .tx_queue_setup               = i40e_dev_tx_queue_setup,
238         .tx_queue_release             = i40e_dev_tx_queue_release,
239         .dev_led_on                   = i40e_dev_led_on,
240         .dev_led_off                  = i40e_dev_led_off,
241         .flow_ctrl_set                = i40e_flow_ctrl_set,
242         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
243         .mac_addr_add                 = i40e_macaddr_add,
244         .mac_addr_remove              = i40e_macaddr_remove,
245         .reta_update                  = i40e_dev_rss_reta_update,
246         .reta_query                   = i40e_dev_rss_reta_query,
247         .rss_hash_update              = i40e_dev_rss_hash_update,
248         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
249         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
250         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
251         .filter_ctrl                  = i40e_dev_filter_ctrl,
252 };
253
254 static struct eth_driver rte_i40e_pmd = {
255         {
256                 .name = "rte_i40e_pmd",
257                 .id_table = pci_id_i40e_map,
258                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
259         },
260         .eth_dev_init = eth_i40e_dev_init,
261         .dev_private_size = sizeof(struct i40e_adapter),
262 };
263
264 static inline int
265 i40e_prev_power_of_2(int n)
266 {
267        int p = n;
268
269        --p;
270        p |= p >> 1;
271        p |= p >> 2;
272        p |= p >> 4;
273        p |= p >> 8;
274        p |= p >> 16;
275        if (p == (n - 1))
276                return n;
277        p >>= 1;
278
279        return ++p;
280 }
281
282 static inline int
283 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
284                                      struct rte_eth_link *link)
285 {
286         struct rte_eth_link *dst = link;
287         struct rte_eth_link *src = &(dev->data->dev_link);
288
289         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
290                                         *(uint64_t *)src) == 0)
291                 return -1;
292
293         return 0;
294 }
295
296 static inline int
297 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
298                                       struct rte_eth_link *link)
299 {
300         struct rte_eth_link *dst = &(dev->data->dev_link);
301         struct rte_eth_link *src = link;
302
303         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
304                                         *(uint64_t *)src) == 0)
305                 return -1;
306
307         return 0;
308 }
309
310 /*
311  * Driver initialization routine.
312  * Invoked once at EAL init time.
313  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
314  */
315 static int
316 rte_i40e_pmd_init(const char *name __rte_unused,
317                   const char *params __rte_unused)
318 {
319         PMD_INIT_FUNC_TRACE();
320         rte_eth_driver_register(&rte_i40e_pmd);
321
322         return 0;
323 }
324
325 static struct rte_driver rte_i40e_driver = {
326         .type = PMD_PDEV,
327         .init = rte_i40e_pmd_init,
328 };
329
330 PMD_REGISTER_DRIVER(rte_i40e_driver);
331
332 static int
333 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
334                   struct rte_eth_dev *dev)
335 {
336         struct rte_pci_device *pci_dev;
337         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
338         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
339         struct i40e_vsi *vsi;
340         int ret;
341         uint32_t len;
342         uint8_t aq_fail = 0;
343
344         PMD_INIT_FUNC_TRACE();
345
346         dev->dev_ops = &i40e_eth_dev_ops;
347         dev->rx_pkt_burst = i40e_recv_pkts;
348         dev->tx_pkt_burst = i40e_xmit_pkts;
349
350         /* for secondary processes, we don't initialise any further as primary
351          * has already done this work. Only check we don't need a different
352          * RX function */
353         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
354                 if (dev->data->scattered_rx)
355                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
356                 return 0;
357         }
358         pci_dev = dev->pci_dev;
359         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
360         pf->adapter->eth_dev = dev;
361         pf->dev_data = dev->data;
362
363         hw->back = I40E_PF_TO_ADAPTER(pf);
364         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
365         if (!hw->hw_addr) {
366                 PMD_INIT_LOG(ERR, "Hardware is not available, "
367                              "as address is NULL");
368                 return -ENODEV;
369         }
370
371         hw->vendor_id = pci_dev->id.vendor_id;
372         hw->device_id = pci_dev->id.device_id;
373         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
374         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
375         hw->bus.device = pci_dev->addr.devid;
376         hw->bus.func = pci_dev->addr.function;
377
378         /* Make sure all is clean before doing PF reset */
379         i40e_clear_hw(hw);
380
381         /* Reset here to make sure all is clean for each PF */
382         ret = i40e_pf_reset(hw);
383         if (ret) {
384                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
385                 return ret;
386         }
387
388         /* Initialize the shared code (base driver) */
389         ret = i40e_init_shared_code(hw);
390         if (ret) {
391                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
392                 return ret;
393         }
394
395         /* Initialize the parameters for adminq */
396         i40e_init_adminq_parameter(hw);
397         ret = i40e_init_adminq(hw);
398         if (ret != I40E_SUCCESS) {
399                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
400                 return -EIO;
401         }
402         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
403                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
404                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
405                      ((hw->nvm.version >> 12) & 0xf),
406                      ((hw->nvm.version >> 4) & 0xff),
407                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
408
409         /* Disable LLDP */
410         ret = i40e_aq_stop_lldp(hw, true, NULL);
411         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
412                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
413
414         /* Clear PXE mode */
415         i40e_clear_pxe_mode(hw);
416
417         /* Get hw capabilities */
418         ret = i40e_get_cap(hw);
419         if (ret != I40E_SUCCESS) {
420                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
421                 goto err_get_capabilities;
422         }
423
424         /* Initialize parameters for PF */
425         ret = i40e_pf_parameter_init(dev);
426         if (ret != 0) {
427                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
428                 goto err_parameter_init;
429         }
430
431         /* Initialize the queue management */
432         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
433         if (ret < 0) {
434                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
435                 goto err_qp_pool_init;
436         }
437         ret = i40e_res_pool_init(&pf->msix_pool, 1,
438                                 hw->func_caps.num_msix_vectors - 1);
439         if (ret < 0) {
440                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
441                 goto err_msix_pool_init;
442         }
443
444         /* Initialize lan hmc */
445         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
446                                 hw->func_caps.num_rx_qp, 0, 0);
447         if (ret != I40E_SUCCESS) {
448                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
449                 goto err_init_lan_hmc;
450         }
451
452         /* Configure lan hmc */
453         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
454         if (ret != I40E_SUCCESS) {
455                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
456                 goto err_configure_lan_hmc;
457         }
458
459         /* Get and check the mac address */
460         i40e_get_mac_addr(hw, hw->mac.addr);
461         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
462                 PMD_INIT_LOG(ERR, "mac address is not valid");
463                 ret = -EIO;
464                 goto err_get_mac_addr;
465         }
466         /* Copy the permanent MAC address */
467         ether_addr_copy((struct ether_addr *) hw->mac.addr,
468                         (struct ether_addr *) hw->mac.perm_addr);
469
470         /* Disable flow control */
471         hw->fc.requested_mode = I40E_FC_NONE;
472         i40e_set_fc(hw, &aq_fail, TRUE);
473
474         /* PF setup, which includes VSI setup */
475         ret = i40e_pf_setup(pf);
476         if (ret) {
477                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
478                 goto err_setup_pf_switch;
479         }
480
481         vsi = pf->main_vsi;
482
483         /* Disable double vlan by default */
484         i40e_vsi_config_double_vlan(vsi, FALSE);
485
486         if (!vsi->max_macaddrs)
487                 len = ETHER_ADDR_LEN;
488         else
489                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
490
491         /* Should be after VSI initialized */
492         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
493         if (!dev->data->mac_addrs) {
494                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
495                                         "for storing mac address");
496                 goto err_get_mac_addr;
497         }
498         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
499                                         &dev->data->mac_addrs[0]);
500
501         /* initialize pf host driver to setup SRIOV resource if applicable */
502         i40e_pf_host_init(dev);
503
504         /* register callback func to eal lib */
505         rte_intr_callback_register(&(pci_dev->intr_handle),
506                 i40e_dev_interrupt_handler, (void *)dev);
507
508         /* configure and enable device interrupt */
509         i40e_pf_config_irq0(hw);
510         i40e_pf_enable_irq0(hw);
511
512         /* enable uio intr after callback register */
513         rte_intr_enable(&(pci_dev->intr_handle));
514
515         return 0;
516
517 err_setup_pf_switch:
518         rte_free(pf->main_vsi);
519 err_get_mac_addr:
520 err_configure_lan_hmc:
521         (void)i40e_shutdown_lan_hmc(hw);
522 err_init_lan_hmc:
523         i40e_res_pool_destroy(&pf->msix_pool);
524 err_msix_pool_init:
525         i40e_res_pool_destroy(&pf->qp_pool);
526 err_qp_pool_init:
527 err_parameter_init:
528 err_get_capabilities:
529         (void)i40e_shutdown_adminq(hw);
530
531         return ret;
532 }
533
534 static int
535 i40e_dev_configure(struct rte_eth_dev *dev)
536 {
537         return i40e_dev_init_vlan(dev);
538 }
539
540 void
541 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
542 {
543         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
544         uint16_t msix_vect = vsi->msix_intr;
545         uint16_t i;
546
547         for (i = 0; i < vsi->nb_qps; i++) {
548                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
549                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
550                 rte_wmb();
551         }
552
553         if (vsi->type != I40E_VSI_SRIOV) {
554                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
555                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
556                                 msix_vect - 1), 0);
557         } else {
558                 uint32_t reg;
559                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
560                         vsi->user_param + (msix_vect - 1);
561
562                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
563         }
564         I40E_WRITE_FLUSH(hw);
565 }
566
567 static inline uint16_t
568 i40e_calc_itr_interval(int16_t interval)
569 {
570         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
571                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
572
573         /* Convert to hardware count, as writing each 1 represents 2 us */
574         return (interval/2);
575 }
576
577 void
578 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
579 {
580         uint32_t val;
581         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
582         uint16_t msix_vect = vsi->msix_intr;
583         uint16_t interval =
584                 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
585         int i;
586
587         for (i = 0; i < vsi->nb_qps; i++)
588                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
589
590         /* Bind all RX queues to allocated MSIX interrupt */
591         for (i = 0; i < vsi->nb_qps; i++) {
592                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
593                         (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
594                         ((vsi->base_queue + i + 1) <<
595                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
596                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
597                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
598
599                 if (i == vsi->nb_qps - 1)
600                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
601                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
602         }
603
604         /* Write first RX queue to Link list register as the head element */
605         if (vsi->type != I40E_VSI_SRIOV) {
606                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
607                                                 (vsi->base_queue <<
608                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
609                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
610
611                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
612                                                 msix_vect - 1), interval);
613
614                 /* Disable auto-mask on enabling of all none-zero  interrupt */
615                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
616                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
617         } else {
618                 uint32_t reg;
619
620                 /* num_msix_vectors_vf needs to minus irq0 */
621                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
622                         vsi->user_param + (msix_vect - 1);
623
624                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
625                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
626                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
627         }
628
629         I40E_WRITE_FLUSH(hw);
630 }
631
632 static void
633 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
634 {
635         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
636         uint16_t interval = i40e_calc_itr_interval(\
637                         RTE_LIBRTE_I40E_ITR_INTERVAL);
638
639         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
640                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
641                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
642                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
643                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
644 }
645
646 static void
647 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
648 {
649         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
650
651         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
652 }
653
654 static inline uint8_t
655 i40e_parse_link_speed(uint16_t eth_link_speed)
656 {
657         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
658
659         switch (eth_link_speed) {
660         case ETH_LINK_SPEED_40G:
661                 link_speed = I40E_LINK_SPEED_40GB;
662                 break;
663         case ETH_LINK_SPEED_20G:
664                 link_speed = I40E_LINK_SPEED_20GB;
665                 break;
666         case ETH_LINK_SPEED_10G:
667                 link_speed = I40E_LINK_SPEED_10GB;
668                 break;
669         case ETH_LINK_SPEED_1000:
670                 link_speed = I40E_LINK_SPEED_1GB;
671                 break;
672         case ETH_LINK_SPEED_100:
673                 link_speed = I40E_LINK_SPEED_100MB;
674                 break;
675         }
676
677         return link_speed;
678 }
679
680 static int
681 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
682 {
683         enum i40e_status_code status;
684         struct i40e_aq_get_phy_abilities_resp phy_ab;
685         struct i40e_aq_set_phy_config phy_conf;
686         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
687                         I40E_AQ_PHY_FLAG_PAUSE_RX |
688                         I40E_AQ_PHY_FLAG_LOW_POWER;
689         const uint8_t advt = I40E_LINK_SPEED_40GB |
690                         I40E_LINK_SPEED_10GB |
691                         I40E_LINK_SPEED_1GB |
692                         I40E_LINK_SPEED_100MB;
693         int ret = -ENOTSUP;
694
695         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
696                                               NULL);
697         if (status)
698                 return ret;
699
700         memset(&phy_conf, 0, sizeof(phy_conf));
701
702         /* bits 0-2 use the values from get_phy_abilities_resp */
703         abilities &= ~mask;
704         abilities |= phy_ab.abilities & mask;
705
706         /* update ablities and speed */
707         if (abilities & I40E_AQ_PHY_AN_ENABLED)
708                 phy_conf.link_speed = advt;
709         else
710                 phy_conf.link_speed = force_speed;
711
712         phy_conf.abilities = abilities;
713
714         /* use get_phy_abilities_resp value for the rest */
715         phy_conf.phy_type = phy_ab.phy_type;
716         phy_conf.eee_capability = phy_ab.eee_capability;
717         phy_conf.eeer = phy_ab.eeer_val;
718         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
719
720         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
721                     phy_ab.abilities, phy_ab.link_speed);
722         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
723                     phy_conf.abilities, phy_conf.link_speed);
724
725         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
726         if (status)
727                 return ret;
728
729         return I40E_SUCCESS;
730 }
731
732 static int
733 i40e_apply_link_speed(struct rte_eth_dev *dev)
734 {
735         uint8_t speed;
736         uint8_t abilities = 0;
737         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
738         struct rte_eth_conf *conf = &dev->data->dev_conf;
739
740         speed = i40e_parse_link_speed(conf->link_speed);
741         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
742         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
743                 abilities |= I40E_AQ_PHY_AN_ENABLED;
744         else
745                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
746
747         return i40e_phy_conf_link(hw, abilities, speed);
748 }
749
750 static int
751 i40e_dev_start(struct rte_eth_dev *dev)
752 {
753         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
754         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
755         struct i40e_vsi *vsi = pf->main_vsi;
756         int ret;
757
758         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
759                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
760                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
761                              dev->data->dev_conf.link_duplex,
762                              dev->data->port_id);
763                 return -EINVAL;
764         }
765
766         /* Initialize VSI */
767         ret = i40e_vsi_init(vsi);
768         if (ret != I40E_SUCCESS) {
769                 PMD_DRV_LOG(ERR, "Failed to init VSI");
770                 goto err_up;
771         }
772
773         /* Map queues with MSIX interrupt */
774         i40e_vsi_queues_bind_intr(vsi);
775         i40e_vsi_enable_queues_intr(vsi);
776
777         /* Enable all queues which have been configured */
778         ret = i40e_vsi_switch_queues(vsi, TRUE);
779         if (ret != I40E_SUCCESS) {
780                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
781                 goto err_up;
782         }
783
784         /* Enable receiving broadcast packets */
785         if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
786                 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
787                 if (ret != I40E_SUCCESS)
788                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
789         }
790
791         /* Apply link configure */
792         ret = i40e_apply_link_speed(dev);
793         if (I40E_SUCCESS != ret) {
794                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
795                 goto err_up;
796         }
797
798         return I40E_SUCCESS;
799
800 err_up:
801         i40e_vsi_switch_queues(vsi, FALSE);
802
803         return ret;
804 }
805
806 static void
807 i40e_dev_stop(struct rte_eth_dev *dev)
808 {
809         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
810         struct i40e_vsi *vsi = pf->main_vsi;
811
812         /* Disable all queues */
813         i40e_vsi_switch_queues(vsi, FALSE);
814
815         /* Set link down */
816         i40e_dev_set_link_down(dev);
817
818         /* un-map queues with interrupt registers */
819         i40e_vsi_disable_queues_intr(vsi);
820         i40e_vsi_queues_unbind_intr(vsi);
821 }
822
823 static void
824 i40e_dev_close(struct rte_eth_dev *dev)
825 {
826         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
827         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
828         uint32_t reg;
829
830         PMD_INIT_FUNC_TRACE();
831
832         i40e_dev_stop(dev);
833
834         /* Disable interrupt */
835         i40e_pf_disable_irq0(hw);
836         rte_intr_disable(&(dev->pci_dev->intr_handle));
837
838         /* shutdown and destroy the HMC */
839         i40e_shutdown_lan_hmc(hw);
840
841         /* release all the existing VSIs and VEBs */
842         i40e_vsi_release(pf->main_vsi);
843
844         /* shutdown the adminq */
845         i40e_aq_queue_shutdown(hw, true);
846         i40e_shutdown_adminq(hw);
847
848         i40e_res_pool_destroy(&pf->qp_pool);
849         i40e_res_pool_destroy(&pf->msix_pool);
850
851         /* force a PF reset to clean anything leftover */
852         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
853         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
854                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
855         I40E_WRITE_FLUSH(hw);
856 }
857
858 static void
859 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
860 {
861         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
862         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
863         struct i40e_vsi *vsi = pf->main_vsi;
864         int status;
865
866         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
867                                                         true, NULL);
868         if (status != I40E_SUCCESS)
869                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
870
871         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
872                                                         TRUE, NULL);
873         if (status != I40E_SUCCESS)
874                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
875
876 }
877
878 static void
879 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
880 {
881         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
882         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
883         struct i40e_vsi *vsi = pf->main_vsi;
884         int status;
885
886         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
887                                                         false, NULL);
888         if (status != I40E_SUCCESS)
889                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
890
891         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
892                                                         false, NULL);
893         if (status != I40E_SUCCESS)
894                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
895 }
896
897 static void
898 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
899 {
900         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
901         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
902         struct i40e_vsi *vsi = pf->main_vsi;
903         int ret;
904
905         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
906         if (ret != I40E_SUCCESS)
907                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
908 }
909
910 static void
911 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
912 {
913         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
914         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
915         struct i40e_vsi *vsi = pf->main_vsi;
916         int ret;
917
918         if (dev->data->promiscuous == 1)
919                 return; /* must remain in all_multicast mode */
920
921         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
922                                 vsi->seid, FALSE, NULL);
923         if (ret != I40E_SUCCESS)
924                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
925 }
926
927 /*
928  * Set device link up.
929  */
930 static int
931 i40e_dev_set_link_up(struct rte_eth_dev *dev)
932 {
933         /* re-apply link speed setting */
934         return i40e_apply_link_speed(dev);
935 }
936
937 /*
938  * Set device link down.
939  */
940 static int
941 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
942 {
943         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
944         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
945         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
946
947         return i40e_phy_conf_link(hw, abilities, speed);
948 }
949
950 int
951 i40e_dev_link_update(struct rte_eth_dev *dev,
952                      __rte_unused int wait_to_complete)
953 {
954         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
955         struct i40e_link_status link_status;
956         struct rte_eth_link link, old;
957         int status;
958
959         memset(&link, 0, sizeof(link));
960         memset(&old, 0, sizeof(old));
961         memset(&link_status, 0, sizeof(link_status));
962         rte_i40e_dev_atomic_read_link_status(dev, &old);
963
964         /* Get link status information from hardware */
965         status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
966         if (status != I40E_SUCCESS) {
967                 link.link_speed = ETH_LINK_SPEED_100;
968                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
969                 PMD_DRV_LOG(ERR, "Failed to get link info");
970                 goto out;
971         }
972
973         link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
974
975         if (!link.link_status)
976                 goto out;
977
978         /* i40e uses full duplex only */
979         link.link_duplex = ETH_LINK_FULL_DUPLEX;
980
981         /* Parse the link status */
982         switch (link_status.link_speed) {
983         case I40E_LINK_SPEED_100MB:
984                 link.link_speed = ETH_LINK_SPEED_100;
985                 break;
986         case I40E_LINK_SPEED_1GB:
987                 link.link_speed = ETH_LINK_SPEED_1000;
988                 break;
989         case I40E_LINK_SPEED_10GB:
990                 link.link_speed = ETH_LINK_SPEED_10G;
991                 break;
992         case I40E_LINK_SPEED_20GB:
993                 link.link_speed = ETH_LINK_SPEED_20G;
994                 break;
995         case I40E_LINK_SPEED_40GB:
996                 link.link_speed = ETH_LINK_SPEED_40G;
997                 break;
998         default:
999                 link.link_speed = ETH_LINK_SPEED_100;
1000                 break;
1001         }
1002
1003 out:
1004         rte_i40e_dev_atomic_write_link_status(dev, &link);
1005         if (link.link_status == old.link_status)
1006                 return -1;
1007
1008         return 0;
1009 }
1010
1011 /* Get all the statistics of a VSI */
1012 void
1013 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1014 {
1015         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1016         struct i40e_eth_stats *nes = &vsi->eth_stats;
1017         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1018         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1019
1020         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1021                             vsi->offset_loaded, &oes->rx_bytes,
1022                             &nes->rx_bytes);
1023         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1024                             vsi->offset_loaded, &oes->rx_unicast,
1025                             &nes->rx_unicast);
1026         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1027                             vsi->offset_loaded, &oes->rx_multicast,
1028                             &nes->rx_multicast);
1029         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1030                             vsi->offset_loaded, &oes->rx_broadcast,
1031                             &nes->rx_broadcast);
1032         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1033                             &oes->rx_discards, &nes->rx_discards);
1034         /* GLV_REPC not supported */
1035         /* GLV_RMPC not supported */
1036         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1037                             &oes->rx_unknown_protocol,
1038                             &nes->rx_unknown_protocol);
1039         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1040                             vsi->offset_loaded, &oes->tx_bytes,
1041                             &nes->tx_bytes);
1042         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1043                             vsi->offset_loaded, &oes->tx_unicast,
1044                             &nes->tx_unicast);
1045         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1046                             vsi->offset_loaded, &oes->tx_multicast,
1047                             &nes->tx_multicast);
1048         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1049                             vsi->offset_loaded,  &oes->tx_broadcast,
1050                             &nes->tx_broadcast);
1051         /* GLV_TDPC not supported */
1052         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1053                             &oes->tx_errors, &nes->tx_errors);
1054         vsi->offset_loaded = true;
1055
1056         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1057                     vsi->vsi_id);
1058         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", nes->rx_bytes);
1059         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", nes->rx_unicast);
1060         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", nes->rx_multicast);
1061         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", nes->rx_broadcast);
1062         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", nes->rx_discards);
1063         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1064                     nes->rx_unknown_protocol);
1065         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", nes->tx_bytes);
1066         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", nes->tx_unicast);
1067         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", nes->tx_multicast);
1068         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", nes->tx_broadcast);
1069         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", nes->tx_discards);
1070         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", nes->tx_errors);
1071         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1072                     vsi->vsi_id);
1073 }
1074
1075 /* Get all statistics of a port */
1076 static void
1077 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1078 {
1079         uint32_t i;
1080         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1081         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1082         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1083         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1084
1085         /* Get statistics of struct i40e_eth_stats */
1086         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1087                             I40E_GLPRT_GORCL(hw->port),
1088                             pf->offset_loaded, &os->eth.rx_bytes,
1089                             &ns->eth.rx_bytes);
1090         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1091                             I40E_GLPRT_UPRCL(hw->port),
1092                             pf->offset_loaded, &os->eth.rx_unicast,
1093                             &ns->eth.rx_unicast);
1094         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1095                             I40E_GLPRT_MPRCL(hw->port),
1096                             pf->offset_loaded, &os->eth.rx_multicast,
1097                             &ns->eth.rx_multicast);
1098         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1099                             I40E_GLPRT_BPRCL(hw->port),
1100                             pf->offset_loaded, &os->eth.rx_broadcast,
1101                             &ns->eth.rx_broadcast);
1102         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1103                             pf->offset_loaded, &os->eth.rx_discards,
1104                             &ns->eth.rx_discards);
1105         /* GLPRT_REPC not supported */
1106         /* GLPRT_RMPC not supported */
1107         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1108                             pf->offset_loaded,
1109                             &os->eth.rx_unknown_protocol,
1110                             &ns->eth.rx_unknown_protocol);
1111         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1112                             I40E_GLPRT_GOTCL(hw->port),
1113                             pf->offset_loaded, &os->eth.tx_bytes,
1114                             &ns->eth.tx_bytes);
1115         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1116                             I40E_GLPRT_UPTCL(hw->port),
1117                             pf->offset_loaded, &os->eth.tx_unicast,
1118                             &ns->eth.tx_unicast);
1119         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1120                             I40E_GLPRT_MPTCL(hw->port),
1121                             pf->offset_loaded, &os->eth.tx_multicast,
1122                             &ns->eth.tx_multicast);
1123         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1124                             I40E_GLPRT_BPTCL(hw->port),
1125                             pf->offset_loaded, &os->eth.tx_broadcast,
1126                             &ns->eth.tx_broadcast);
1127         i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1128                             pf->offset_loaded, &os->eth.tx_discards,
1129                             &ns->eth.tx_discards);
1130         /* GLPRT_TEPC not supported */
1131
1132         /* additional port specific stats */
1133         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1134                             pf->offset_loaded, &os->tx_dropped_link_down,
1135                             &ns->tx_dropped_link_down);
1136         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1137                             pf->offset_loaded, &os->crc_errors,
1138                             &ns->crc_errors);
1139         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1140                             pf->offset_loaded, &os->illegal_bytes,
1141                             &ns->illegal_bytes);
1142         /* GLPRT_ERRBC not supported */
1143         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1144                             pf->offset_loaded, &os->mac_local_faults,
1145                             &ns->mac_local_faults);
1146         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1147                             pf->offset_loaded, &os->mac_remote_faults,
1148                             &ns->mac_remote_faults);
1149         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1150                             pf->offset_loaded, &os->rx_length_errors,
1151                             &ns->rx_length_errors);
1152         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1153                             pf->offset_loaded, &os->link_xon_rx,
1154                             &ns->link_xon_rx);
1155         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1156                             pf->offset_loaded, &os->link_xoff_rx,
1157                             &ns->link_xoff_rx);
1158         for (i = 0; i < 8; i++) {
1159                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1160                                     pf->offset_loaded,
1161                                     &os->priority_xon_rx[i],
1162                                     &ns->priority_xon_rx[i]);
1163                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1164                                     pf->offset_loaded,
1165                                     &os->priority_xoff_rx[i],
1166                                     &ns->priority_xoff_rx[i]);
1167         }
1168         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1169                             pf->offset_loaded, &os->link_xon_tx,
1170                             &ns->link_xon_tx);
1171         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1172                             pf->offset_loaded, &os->link_xoff_tx,
1173                             &ns->link_xoff_tx);
1174         for (i = 0; i < 8; i++) {
1175                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1176                                     pf->offset_loaded,
1177                                     &os->priority_xon_tx[i],
1178                                     &ns->priority_xon_tx[i]);
1179                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1180                                     pf->offset_loaded,
1181                                     &os->priority_xoff_tx[i],
1182                                     &ns->priority_xoff_tx[i]);
1183                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1184                                     pf->offset_loaded,
1185                                     &os->priority_xon_2_xoff[i],
1186                                     &ns->priority_xon_2_xoff[i]);
1187         }
1188         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1189                             I40E_GLPRT_PRC64L(hw->port),
1190                             pf->offset_loaded, &os->rx_size_64,
1191                             &ns->rx_size_64);
1192         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1193                             I40E_GLPRT_PRC127L(hw->port),
1194                             pf->offset_loaded, &os->rx_size_127,
1195                             &ns->rx_size_127);
1196         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1197                             I40E_GLPRT_PRC255L(hw->port),
1198                             pf->offset_loaded, &os->rx_size_255,
1199                             &ns->rx_size_255);
1200         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1201                             I40E_GLPRT_PRC511L(hw->port),
1202                             pf->offset_loaded, &os->rx_size_511,
1203                             &ns->rx_size_511);
1204         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1205                             I40E_GLPRT_PRC1023L(hw->port),
1206                             pf->offset_loaded, &os->rx_size_1023,
1207                             &ns->rx_size_1023);
1208         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1209                             I40E_GLPRT_PRC1522L(hw->port),
1210                             pf->offset_loaded, &os->rx_size_1522,
1211                             &ns->rx_size_1522);
1212         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1213                             I40E_GLPRT_PRC9522L(hw->port),
1214                             pf->offset_loaded, &os->rx_size_big,
1215                             &ns->rx_size_big);
1216         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1217                             pf->offset_loaded, &os->rx_undersize,
1218                             &ns->rx_undersize);
1219         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1220                             pf->offset_loaded, &os->rx_fragments,
1221                             &ns->rx_fragments);
1222         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1223                             pf->offset_loaded, &os->rx_oversize,
1224                             &ns->rx_oversize);
1225         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1226                             pf->offset_loaded, &os->rx_jabber,
1227                             &ns->rx_jabber);
1228         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1229                             I40E_GLPRT_PTC64L(hw->port),
1230                             pf->offset_loaded, &os->tx_size_64,
1231                             &ns->tx_size_64);
1232         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1233                             I40E_GLPRT_PTC127L(hw->port),
1234                             pf->offset_loaded, &os->tx_size_127,
1235                             &ns->tx_size_127);
1236         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1237                             I40E_GLPRT_PTC255L(hw->port),
1238                             pf->offset_loaded, &os->tx_size_255,
1239                             &ns->tx_size_255);
1240         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1241                             I40E_GLPRT_PTC511L(hw->port),
1242                             pf->offset_loaded, &os->tx_size_511,
1243                             &ns->tx_size_511);
1244         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1245                             I40E_GLPRT_PTC1023L(hw->port),
1246                             pf->offset_loaded, &os->tx_size_1023,
1247                             &ns->tx_size_1023);
1248         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1249                             I40E_GLPRT_PTC1522L(hw->port),
1250                             pf->offset_loaded, &os->tx_size_1522,
1251                             &ns->tx_size_1522);
1252         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1253                             I40E_GLPRT_PTC9522L(hw->port),
1254                             pf->offset_loaded, &os->tx_size_big,
1255                             &ns->tx_size_big);
1256         /* GLPRT_MSPDC not supported */
1257         /* GLPRT_XEC not supported */
1258
1259         pf->offset_loaded = true;
1260
1261         if (pf->main_vsi)
1262                 i40e_update_vsi_stats(pf->main_vsi);
1263
1264         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1265                                                 ns->eth.rx_broadcast;
1266         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1267                                                 ns->eth.tx_broadcast;
1268         stats->ibytes   = ns->eth.rx_bytes;
1269         stats->obytes   = ns->eth.tx_bytes;
1270         stats->oerrors  = ns->eth.tx_errors;
1271         stats->imcasts  = ns->eth.rx_multicast;
1272
1273         /* Rx Errors */
1274         stats->ibadcrc  = ns->crc_errors;
1275         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1276                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1277         stats->imissed  = ns->eth.rx_discards;
1278         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1279
1280         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1281         PMD_DRV_LOG(DEBUG, "rx_bytes:            %lu", ns->eth.rx_bytes);
1282         PMD_DRV_LOG(DEBUG, "rx_unicast:          %lu", ns->eth.rx_unicast);
1283         PMD_DRV_LOG(DEBUG, "rx_multicast:        %lu", ns->eth.rx_multicast);
1284         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %lu", ns->eth.rx_broadcast);
1285         PMD_DRV_LOG(DEBUG, "rx_discards:         %lu", ns->eth.rx_discards);
1286         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1287                     ns->eth.rx_unknown_protocol);
1288         PMD_DRV_LOG(DEBUG, "tx_bytes:            %lu", ns->eth.tx_bytes);
1289         PMD_DRV_LOG(DEBUG, "tx_unicast:          %lu", ns->eth.tx_unicast);
1290         PMD_DRV_LOG(DEBUG, "tx_multicast:        %lu", ns->eth.tx_multicast);
1291         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %lu", ns->eth.tx_broadcast);
1292         PMD_DRV_LOG(DEBUG, "tx_discards:         %lu", ns->eth.tx_discards);
1293         PMD_DRV_LOG(DEBUG, "tx_errors:           %lu", ns->eth.tx_errors);
1294
1295         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %lu",
1296                     ns->tx_dropped_link_down);
1297         PMD_DRV_LOG(DEBUG, "crc_errors:               %lu", ns->crc_errors);
1298         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %lu",
1299                     ns->illegal_bytes);
1300         PMD_DRV_LOG(DEBUG, "error_bytes:              %lu", ns->error_bytes);
1301         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %lu",
1302                     ns->mac_local_faults);
1303         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %lu",
1304                     ns->mac_remote_faults);
1305         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %lu",
1306                     ns->rx_length_errors);
1307         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %lu", ns->link_xon_rx);
1308         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %lu", ns->link_xoff_rx);
1309         for (i = 0; i < 8; i++) {
1310                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %lu",
1311                                 i, ns->priority_xon_rx[i]);
1312                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %lu",
1313                                 i, ns->priority_xoff_rx[i]);
1314         }
1315         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %lu", ns->link_xon_tx);
1316         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %lu", ns->link_xoff_tx);
1317         for (i = 0; i < 8; i++) {
1318                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %lu",
1319                                 i, ns->priority_xon_tx[i]);
1320                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %lu",
1321                                 i, ns->priority_xoff_tx[i]);
1322                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %lu",
1323                                 i, ns->priority_xon_2_xoff[i]);
1324         }
1325         PMD_DRV_LOG(DEBUG, "rx_size_64:               %lu", ns->rx_size_64);
1326         PMD_DRV_LOG(DEBUG, "rx_size_127:              %lu", ns->rx_size_127);
1327         PMD_DRV_LOG(DEBUG, "rx_size_255:              %lu", ns->rx_size_255);
1328         PMD_DRV_LOG(DEBUG, "rx_size_511:              %lu", ns->rx_size_511);
1329         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %lu", ns->rx_size_1023);
1330         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %lu", ns->rx_size_1522);
1331         PMD_DRV_LOG(DEBUG, "rx_size_big:              %lu", ns->rx_size_big);
1332         PMD_DRV_LOG(DEBUG, "rx_undersize:             %lu", ns->rx_undersize);
1333         PMD_DRV_LOG(DEBUG, "rx_fragments:             %lu", ns->rx_fragments);
1334         PMD_DRV_LOG(DEBUG, "rx_oversize:              %lu", ns->rx_oversize);
1335         PMD_DRV_LOG(DEBUG, "rx_jabber:                %lu", ns->rx_jabber);
1336         PMD_DRV_LOG(DEBUG, "tx_size_64:               %lu", ns->tx_size_64);
1337         PMD_DRV_LOG(DEBUG, "tx_size_127:              %lu", ns->tx_size_127);
1338         PMD_DRV_LOG(DEBUG, "tx_size_255:              %lu", ns->tx_size_255);
1339         PMD_DRV_LOG(DEBUG, "tx_size_511:              %lu", ns->tx_size_511);
1340         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %lu", ns->tx_size_1023);
1341         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %lu", ns->tx_size_1522);
1342         PMD_DRV_LOG(DEBUG, "tx_size_big:              %lu", ns->tx_size_big);
1343         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1344                         ns->mac_short_packet_dropped);
1345         PMD_DRV_LOG(DEBUG, "checksum_error:           %lu",
1346                     ns->checksum_error);
1347         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1348 }
1349
1350 /* Reset the statistics */
1351 static void
1352 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1353 {
1354         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1355
1356         /* It results in reloading the start point of each counter */
1357         pf->offset_loaded = false;
1358 }
1359
1360 static int
1361 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1362                                  __rte_unused uint16_t queue_id,
1363                                  __rte_unused uint8_t stat_idx,
1364                                  __rte_unused uint8_t is_rx)
1365 {
1366         PMD_INIT_FUNC_TRACE();
1367
1368         return -ENOSYS;
1369 }
1370
1371 static void
1372 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1373 {
1374         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1375         struct i40e_vsi *vsi = pf->main_vsi;
1376
1377         dev_info->max_rx_queues = vsi->nb_qps;
1378         dev_info->max_tx_queues = vsi->nb_qps;
1379         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1380         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1381         dev_info->max_mac_addrs = vsi->max_macaddrs;
1382         dev_info->max_vfs = dev->pci_dev->max_vfs;
1383         dev_info->rx_offload_capa =
1384                 DEV_RX_OFFLOAD_VLAN_STRIP |
1385                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1386                 DEV_RX_OFFLOAD_UDP_CKSUM |
1387                 DEV_RX_OFFLOAD_TCP_CKSUM;
1388         dev_info->tx_offload_capa =
1389                 DEV_TX_OFFLOAD_VLAN_INSERT |
1390                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1391                 DEV_TX_OFFLOAD_UDP_CKSUM |
1392                 DEV_TX_OFFLOAD_TCP_CKSUM |
1393                 DEV_TX_OFFLOAD_SCTP_CKSUM;
1394
1395         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1396                 .rx_thresh = {
1397                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1398                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1399                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1400                 },
1401                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1402                 .rx_drop_en = 0,
1403         };
1404
1405         dev_info->default_txconf = (struct rte_eth_txconf) {
1406                 .tx_thresh = {
1407                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1408                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1409                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1410                 },
1411                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1412                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1413                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
1414         };
1415
1416 }
1417
1418 static int
1419 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1420 {
1421         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1422         struct i40e_vsi *vsi = pf->main_vsi;
1423         PMD_INIT_FUNC_TRACE();
1424
1425         if (on)
1426                 return i40e_vsi_add_vlan(vsi, vlan_id);
1427         else
1428                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1429 }
1430
1431 static void
1432 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1433                    __rte_unused uint16_t tpid)
1434 {
1435         PMD_INIT_FUNC_TRACE();
1436 }
1437
1438 static void
1439 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1440 {
1441         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1442         struct i40e_vsi *vsi = pf->main_vsi;
1443
1444         if (mask & ETH_VLAN_STRIP_MASK) {
1445                 /* Enable or disable VLAN stripping */
1446                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1447                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1448                 else
1449                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1450         }
1451
1452         if (mask & ETH_VLAN_EXTEND_MASK) {
1453                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1454                         i40e_vsi_config_double_vlan(vsi, TRUE);
1455                 else
1456                         i40e_vsi_config_double_vlan(vsi, FALSE);
1457         }
1458 }
1459
1460 static void
1461 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1462                           __rte_unused uint16_t queue,
1463                           __rte_unused int on)
1464 {
1465         PMD_INIT_FUNC_TRACE();
1466 }
1467
1468 static int
1469 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1470 {
1471         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1472         struct i40e_vsi *vsi = pf->main_vsi;
1473         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1474         struct i40e_vsi_vlan_pvid_info info;
1475
1476         memset(&info, 0, sizeof(info));
1477         info.on = on;
1478         if (info.on)
1479                 info.config.pvid = pvid;
1480         else {
1481                 info.config.reject.tagged =
1482                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1483                 info.config.reject.untagged =
1484                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1485         }
1486
1487         return i40e_vsi_vlan_pvid_set(vsi, &info);
1488 }
1489
1490 static int
1491 i40e_dev_led_on(struct rte_eth_dev *dev)
1492 {
1493         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1494         uint32_t mode = i40e_led_get(hw);
1495
1496         if (mode == 0)
1497                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1498
1499         return 0;
1500 }
1501
1502 static int
1503 i40e_dev_led_off(struct rte_eth_dev *dev)
1504 {
1505         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1506         uint32_t mode = i40e_led_get(hw);
1507
1508         if (mode != 0)
1509                 i40e_led_set(hw, 0, false);
1510
1511         return 0;
1512 }
1513
1514 static int
1515 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1516                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1517 {
1518         PMD_INIT_FUNC_TRACE();
1519
1520         return -ENOSYS;
1521 }
1522
1523 static int
1524 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1525                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1526 {
1527         PMD_INIT_FUNC_TRACE();
1528
1529         return -ENOSYS;
1530 }
1531
1532 /* Add a MAC address, and update filters */
1533 static void
1534 i40e_macaddr_add(struct rte_eth_dev *dev,
1535                  struct ether_addr *mac_addr,
1536                  __attribute__((unused)) uint32_t index,
1537                  __attribute__((unused)) uint32_t pool)
1538 {
1539         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1540         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541         struct i40e_vsi *vsi = pf->main_vsi;
1542         struct ether_addr old_mac;
1543         int ret;
1544
1545         if (!is_valid_assigned_ether_addr(mac_addr)) {
1546                 PMD_DRV_LOG(ERR, "Invalid ethernet address");
1547                 return;
1548         }
1549
1550         if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1551                 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
1552                 return;
1553         }
1554
1555         /* Write mac address */
1556         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1557                                         mac_addr->addr_bytes, NULL);
1558         if (ret != I40E_SUCCESS) {
1559                 PMD_DRV_LOG(ERR, "Failed to write mac address");
1560                 return;
1561         }
1562
1563         (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1564         (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1565                         ETHER_ADDR_LEN);
1566
1567         ret = i40e_vsi_add_mac(vsi, mac_addr);
1568         if (ret != I40E_SUCCESS) {
1569                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1570                 return;
1571         }
1572
1573         ether_addr_copy(mac_addr, &pf->dev_addr);
1574         i40e_vsi_delete_mac(vsi, &old_mac);
1575 }
1576
1577 /* Remove a MAC address, and update filters */
1578 static void
1579 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1580 {
1581         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1582         struct i40e_vsi *vsi = pf->main_vsi;
1583         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1584         struct ether_addr *macaddr;
1585         int ret;
1586         struct i40e_hw *hw =
1587                 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1588
1589         if (index >= vsi->max_macaddrs)
1590                 return;
1591
1592         macaddr = &(data->mac_addrs[index]);
1593         if (!is_valid_assigned_ether_addr(macaddr))
1594                 return;
1595
1596         ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1597                                         hw->mac.perm_addr, NULL);
1598         if (ret != I40E_SUCCESS) {
1599                 PMD_DRV_LOG(ERR, "Failed to write mac address");
1600                 return;
1601         }
1602
1603         (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1604
1605         ret = i40e_vsi_delete_mac(vsi, macaddr);
1606         if (ret != I40E_SUCCESS)
1607                 return;
1608
1609         /* Clear device address as it has been removed */
1610         if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1611                 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1612 }
1613
1614 static int
1615 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1616                          struct rte_eth_rss_reta *reta_conf)
1617 {
1618         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1619         uint32_t lut, l;
1620         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1621
1622         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1623                 if (i < max)
1624                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1625                 else
1626                         mask = (uint8_t)((reta_conf->mask_hi >>
1627                                                 (i - max)) & 0xF);
1628
1629                 if (!mask)
1630                         continue;
1631
1632                 if (mask == 0xF)
1633                         l = 0;
1634                 else
1635                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1636
1637                 for (j = 0, lut = 0; j < 4; j++) {
1638                         if (mask & (0x1 << j))
1639                                 lut |= reta_conf->reta[i + j] << (8 * j);
1640                         else
1641                                 lut |= l & (0xFF << (8 * j));
1642                 }
1643                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1644         }
1645
1646         return 0;
1647 }
1648
1649 static int
1650 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1651                         struct rte_eth_rss_reta *reta_conf)
1652 {
1653         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1654         uint32_t lut;
1655         uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1656
1657         for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1658                 if (i < max)
1659                         mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1660                 else
1661                         mask = (uint8_t)((reta_conf->mask_hi >>
1662                                                 (i - max)) & 0xF);
1663
1664                 if (!mask)
1665                         continue;
1666
1667                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1668                 for (j = 0; j < 4; j++) {
1669                         if (mask & (0x1 << j))
1670                                 reta_conf->reta[i + j] =
1671                                         (uint8_t)((lut >> (8 * j)) & 0xFF);
1672                 }
1673         }
1674
1675         return 0;
1676 }
1677
1678 /**
1679  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1680  * @hw:   pointer to the HW structure
1681  * @mem:  pointer to mem struct to fill out
1682  * @size: size of memory requested
1683  * @alignment: what to align the allocation to
1684  **/
1685 enum i40e_status_code
1686 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1687                         struct i40e_dma_mem *mem,
1688                         u64 size,
1689                         u32 alignment)
1690 {
1691         static uint64_t id = 0;
1692         const struct rte_memzone *mz = NULL;
1693         char z_name[RTE_MEMZONE_NAMESIZE];
1694
1695         if (!mem)
1696                 return I40E_ERR_PARAM;
1697
1698         id++;
1699         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1700 #ifdef RTE_LIBRTE_XEN_DOM0
1701         mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1702                                                         RTE_PGSIZE_2M);
1703 #else
1704         mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1705 #endif
1706         if (!mz)
1707                 return I40E_ERR_NO_MEMORY;
1708
1709         mem->id = id;
1710         mem->size = size;
1711         mem->va = mz->addr;
1712 #ifdef RTE_LIBRTE_XEN_DOM0
1713         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1714 #else
1715         mem->pa = mz->phys_addr;
1716 #endif
1717
1718         return I40E_SUCCESS;
1719 }
1720
1721 /**
1722  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1723  * @hw:   pointer to the HW structure
1724  * @mem:  ptr to mem struct to free
1725  **/
1726 enum i40e_status_code
1727 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1728                     struct i40e_dma_mem *mem)
1729 {
1730         if (!mem || !mem->va)
1731                 return I40E_ERR_PARAM;
1732
1733         mem->va = NULL;
1734         mem->pa = (u64)0;
1735
1736         return I40E_SUCCESS;
1737 }
1738
1739 /**
1740  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1741  * @hw:   pointer to the HW structure
1742  * @mem:  pointer to mem struct to fill out
1743  * @size: size of memory requested
1744  **/
1745 enum i40e_status_code
1746 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1747                          struct i40e_virt_mem *mem,
1748                          u32 size)
1749 {
1750         if (!mem)
1751                 return I40E_ERR_PARAM;
1752
1753         mem->size = size;
1754         mem->va = rte_zmalloc("i40e", size, 0);
1755
1756         if (mem->va)
1757                 return I40E_SUCCESS;
1758         else
1759                 return I40E_ERR_NO_MEMORY;
1760 }
1761
1762 /**
1763  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1764  * @hw:   pointer to the HW structure
1765  * @mem:  pointer to mem struct to free
1766  **/
1767 enum i40e_status_code
1768 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1769                      struct i40e_virt_mem *mem)
1770 {
1771         if (!mem)
1772                 return I40E_ERR_PARAM;
1773
1774         rte_free(mem->va);
1775         mem->va = NULL;
1776
1777         return I40E_SUCCESS;
1778 }
1779
1780 void
1781 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1782 {
1783         rte_spinlock_init(&sp->spinlock);
1784 }
1785
1786 void
1787 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1788 {
1789         rte_spinlock_lock(&sp->spinlock);
1790 }
1791
1792 void
1793 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1794 {
1795         rte_spinlock_unlock(&sp->spinlock);
1796 }
1797
1798 void
1799 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1800 {
1801         return;
1802 }
1803
1804 /**
1805  * Get the hardware capabilities, which will be parsed
1806  * and saved into struct i40e_hw.
1807  */
1808 static int
1809 i40e_get_cap(struct i40e_hw *hw)
1810 {
1811         struct i40e_aqc_list_capabilities_element_resp *buf;
1812         uint16_t len, size = 0;
1813         int ret;
1814
1815         /* Calculate a huge enough buff for saving response data temporarily */
1816         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1817                                                 I40E_MAX_CAP_ELE_NUM;
1818         buf = rte_zmalloc("i40e", len, 0);
1819         if (!buf) {
1820                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1821                 return I40E_ERR_NO_MEMORY;
1822         }
1823
1824         /* Get, parse the capabilities and save it to hw */
1825         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1826                         i40e_aqc_opc_list_func_capabilities, NULL);
1827         if (ret != I40E_SUCCESS)
1828                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1829
1830         /* Free the temporary buffer after being used */
1831         rte_free(buf);
1832
1833         return ret;
1834 }
1835
1836 static int
1837 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1838 {
1839         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1840         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1841         uint16_t sum_queues = 0, sum_vsis;
1842
1843         /* First check if FW support SRIOV */
1844         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1845                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
1846                 return -EINVAL;
1847         }
1848
1849         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1850         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1851         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
1852         /* Allocate queues for pf */
1853         if (hw->func_caps.rss) {
1854                 pf->flags |= I40E_FLAG_RSS;
1855                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1856                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1857                 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1858         } else
1859                 pf->lan_nb_qps = 1;
1860         sum_queues = pf->lan_nb_qps;
1861         /* Default VSI is not counted in */
1862         sum_vsis = 0;
1863         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
1864
1865         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1866                 pf->flags |= I40E_FLAG_SRIOV;
1867                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1868                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1869                         PMD_INIT_LOG(ERR, "Config VF number %u, "
1870                                      "max supported %u.",
1871                                      dev->pci_dev->max_vfs,
1872                                      hw->func_caps.num_vfs);
1873                         return -EINVAL;
1874                 }
1875                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1876                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1877                                      "max support %u queues.",
1878                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
1879                         return -EINVAL;
1880                 }
1881                 pf->vf_num = dev->pci_dev->max_vfs;
1882                 sum_queues += pf->vf_nb_qps * pf->vf_num;
1883                 sum_vsis   += pf->vf_num;
1884                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
1885                              pf->vf_num, pf->vf_nb_qps);
1886         } else
1887                 pf->vf_num = 0;
1888
1889         if (hw->func_caps.vmdq) {
1890                 pf->flags |= I40E_FLAG_VMDQ;
1891                 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1892                 sum_queues += pf->vmdq_nb_qps;
1893                 sum_vsis += 1;
1894                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
1895         }
1896
1897         if (hw->func_caps.fd) {
1898                 pf->flags |= I40E_FLAG_FDIR;
1899                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1900                 /**
1901                  * Each flow director consumes one VSI and one queue,
1902                  * but can't calculate out predictably here.
1903                  */
1904         }
1905
1906         if (sum_vsis > pf->max_num_vsi ||
1907                 sum_queues > hw->func_caps.num_rx_qp) {
1908                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
1909                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
1910                              pf->max_num_vsi, sum_vsis);
1911                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
1912                              hw->func_caps.num_rx_qp, sum_queues);
1913                 return -EINVAL;
1914         }
1915
1916         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
1917          * cause */
1918         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1919                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
1920                              sum_vsis, hw->func_caps.num_msix_vectors);
1921                 return -EINVAL;
1922         }
1923         return I40E_SUCCESS;
1924 }
1925
1926 static int
1927 i40e_pf_get_switch_config(struct i40e_pf *pf)
1928 {
1929         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1930         struct i40e_aqc_get_switch_config_resp *switch_config;
1931         struct i40e_aqc_switch_config_element_resp *element;
1932         uint16_t start_seid = 0, num_reported;
1933         int ret;
1934
1935         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1936                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1937         if (!switch_config) {
1938                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
1939                 return -ENOMEM;
1940         }
1941
1942         /* Get the switch configurations */
1943         ret = i40e_aq_get_switch_config(hw, switch_config,
1944                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1945         if (ret != I40E_SUCCESS) {
1946                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
1947                 goto fail;
1948         }
1949         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1950         if (num_reported != 1) { /* The number should be 1 */
1951                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
1952                 goto fail;
1953         }
1954
1955         /* Parse the switch configuration elements */
1956         element = &(switch_config->element[0]);
1957         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1958                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1959                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1960         } else
1961                 PMD_DRV_LOG(INFO, "Unknown element type");
1962
1963 fail:
1964         rte_free(switch_config);
1965
1966         return ret;
1967 }
1968
1969 static int
1970 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1971                         uint32_t num)
1972 {
1973         struct pool_entry *entry;
1974
1975         if (pool == NULL || num == 0)
1976                 return -EINVAL;
1977
1978         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1979         if (entry == NULL) {
1980                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
1981                 return -ENOMEM;
1982         }
1983
1984         /* queue heap initialize */
1985         pool->num_free = num;
1986         pool->num_alloc = 0;
1987         pool->base = base;
1988         LIST_INIT(&pool->alloc_list);
1989         LIST_INIT(&pool->free_list);
1990
1991         /* Initialize element  */
1992         entry->base = 0;
1993         entry->len = num;
1994
1995         LIST_INSERT_HEAD(&pool->free_list, entry, next);
1996         return 0;
1997 }
1998
1999 static void
2000 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2001 {
2002         struct pool_entry *entry;
2003
2004         if (pool == NULL)
2005                 return;
2006
2007         LIST_FOREACH(entry, &pool->alloc_list, next) {
2008                 LIST_REMOVE(entry, next);
2009                 rte_free(entry);
2010         }
2011
2012         LIST_FOREACH(entry, &pool->free_list, next) {
2013                 LIST_REMOVE(entry, next);
2014                 rte_free(entry);
2015         }
2016
2017         pool->num_free = 0;
2018         pool->num_alloc = 0;
2019         pool->base = 0;
2020         LIST_INIT(&pool->alloc_list);
2021         LIST_INIT(&pool->free_list);
2022 }
2023
2024 static int
2025 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2026                        uint32_t base)
2027 {
2028         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2029         uint32_t pool_offset;
2030         int insert;
2031
2032         if (pool == NULL) {
2033                 PMD_DRV_LOG(ERR, "Invalid parameter");
2034                 return -EINVAL;
2035         }
2036
2037         pool_offset = base - pool->base;
2038         /* Lookup in alloc list */
2039         LIST_FOREACH(entry, &pool->alloc_list, next) {
2040                 if (entry->base == pool_offset) {
2041                         valid_entry = entry;
2042                         LIST_REMOVE(entry, next);
2043                         break;
2044                 }
2045         }
2046
2047         /* Not find, return */
2048         if (valid_entry == NULL) {
2049                 PMD_DRV_LOG(ERR, "Failed to find entry");
2050                 return -EINVAL;
2051         }
2052
2053         /**
2054          * Found it, move it to free list  and try to merge.
2055          * In order to make merge easier, always sort it by qbase.
2056          * Find adjacent prev and last entries.
2057          */
2058         prev = next = NULL;
2059         LIST_FOREACH(entry, &pool->free_list, next) {
2060                 if (entry->base > valid_entry->base) {
2061                         next = entry;
2062                         break;
2063                 }
2064                 prev = entry;
2065         }
2066
2067         insert = 0;
2068         /* Try to merge with next one*/
2069         if (next != NULL) {
2070                 /* Merge with next one */
2071                 if (valid_entry->base + valid_entry->len == next->base) {
2072                         next->base = valid_entry->base;
2073                         next->len += valid_entry->len;
2074                         rte_free(valid_entry);
2075                         valid_entry = next;
2076                         insert = 1;
2077                 }
2078         }
2079
2080         if (prev != NULL) {
2081                 /* Merge with previous one */
2082                 if (prev->base + prev->len == valid_entry->base) {
2083                         prev->len += valid_entry->len;
2084                         /* If it merge with next one, remove next node */
2085                         if (insert == 1) {
2086                                 LIST_REMOVE(valid_entry, next);
2087                                 rte_free(valid_entry);
2088                         } else {
2089                                 rte_free(valid_entry);
2090                                 insert = 1;
2091                         }
2092                 }
2093         }
2094
2095         /* Not find any entry to merge, insert */
2096         if (insert == 0) {
2097                 if (prev != NULL)
2098                         LIST_INSERT_AFTER(prev, valid_entry, next);
2099                 else if (next != NULL)
2100                         LIST_INSERT_BEFORE(next, valid_entry, next);
2101                 else /* It's empty list, insert to head */
2102                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2103         }
2104
2105         pool->num_free += valid_entry->len;
2106         pool->num_alloc -= valid_entry->len;
2107
2108         return 0;
2109 }
2110
2111 static int
2112 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2113                        uint16_t num)
2114 {
2115         struct pool_entry *entry, *valid_entry;
2116
2117         if (pool == NULL || num == 0) {
2118                 PMD_DRV_LOG(ERR, "Invalid parameter");
2119                 return -EINVAL;
2120         }
2121
2122         if (pool->num_free < num) {
2123                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2124                             num, pool->num_free);
2125                 return -ENOMEM;
2126         }
2127
2128         valid_entry = NULL;
2129         /* Lookup  in free list and find most fit one */
2130         LIST_FOREACH(entry, &pool->free_list, next) {
2131                 if (entry->len >= num) {
2132                         /* Find best one */
2133                         if (entry->len == num) {
2134                                 valid_entry = entry;
2135                                 break;
2136                         }
2137                         if (valid_entry == NULL || valid_entry->len > entry->len)
2138                                 valid_entry = entry;
2139                 }
2140         }
2141
2142         /* Not find one to satisfy the request, return */
2143         if (valid_entry == NULL) {
2144                 PMD_DRV_LOG(ERR, "No valid entry found");
2145                 return -ENOMEM;
2146         }
2147         /**
2148          * The entry have equal queue number as requested,
2149          * remove it from alloc_list.
2150          */
2151         if (valid_entry->len == num) {
2152                 LIST_REMOVE(valid_entry, next);
2153         } else {
2154                 /**
2155                  * The entry have more numbers than requested,
2156                  * create a new entry for alloc_list and minus its
2157                  * queue base and number in free_list.
2158                  */
2159                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2160                 if (entry == NULL) {
2161                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2162                                     "resource pool");
2163                         return -ENOMEM;
2164                 }
2165                 entry->base = valid_entry->base;
2166                 entry->len = num;
2167                 valid_entry->base += num;
2168                 valid_entry->len -= num;
2169                 valid_entry = entry;
2170         }
2171
2172         /* Insert it into alloc list, not sorted */
2173         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2174
2175         pool->num_free -= valid_entry->len;
2176         pool->num_alloc += valid_entry->len;
2177
2178         return (valid_entry->base + pool->base);
2179 }
2180
2181 /**
2182  * bitmap_is_subset - Check whether src2 is subset of src1
2183  **/
2184 static inline int
2185 bitmap_is_subset(uint8_t src1, uint8_t src2)
2186 {
2187         return !((src1 ^ src2) & src2);
2188 }
2189
2190 static int
2191 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2192 {
2193         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2194
2195         /* If DCB is not supported, only default TC is supported */
2196         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2197                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2198                 return -EINVAL;
2199         }
2200
2201         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2202                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2203                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2204                             enabled_tcmap);
2205                 return -EINVAL;
2206         }
2207         return I40E_SUCCESS;
2208 }
2209
2210 int
2211 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2212                                 struct i40e_vsi_vlan_pvid_info *info)
2213 {
2214         struct i40e_hw *hw;
2215         struct i40e_vsi_context ctxt;
2216         uint8_t vlan_flags = 0;
2217         int ret;
2218
2219         if (vsi == NULL || info == NULL) {
2220                 PMD_DRV_LOG(ERR, "invalid parameters");
2221                 return I40E_ERR_PARAM;
2222         }
2223
2224         if (info->on) {
2225                 vsi->info.pvid = info->config.pvid;
2226                 /**
2227                  * If insert pvid is enabled, only tagged pkts are
2228                  * allowed to be sent out.
2229                  */
2230                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2231                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2232         } else {
2233                 vsi->info.pvid = 0;
2234                 if (info->config.reject.tagged == 0)
2235                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2236
2237                 if (info->config.reject.untagged == 0)
2238                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2239         }
2240         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2241                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2242         vsi->info.port_vlan_flags |= vlan_flags;
2243         vsi->info.valid_sections =
2244                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2245         memset(&ctxt, 0, sizeof(ctxt));
2246         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2247         ctxt.seid = vsi->seid;
2248
2249         hw = I40E_VSI_TO_HW(vsi);
2250         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2251         if (ret != I40E_SUCCESS)
2252                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2253
2254         return ret;
2255 }
2256
2257 static int
2258 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2259 {
2260         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2261         int i, ret;
2262         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2263
2264         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2265         if (ret != I40E_SUCCESS)
2266                 return ret;
2267
2268         if (!vsi->seid) {
2269                 PMD_DRV_LOG(ERR, "seid not valid");
2270                 return -EINVAL;
2271         }
2272
2273         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2274         tc_bw_data.tc_valid_bits = enabled_tcmap;
2275         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2276                 tc_bw_data.tc_bw_credits[i] =
2277                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2278
2279         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2280         if (ret != I40E_SUCCESS) {
2281                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2282                 return ret;
2283         }
2284
2285         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2286                                         sizeof(vsi->info.qs_handle));
2287         return I40E_SUCCESS;
2288 }
2289
2290 static int
2291 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2292                                  struct i40e_aqc_vsi_properties_data *info,
2293                                  uint8_t enabled_tcmap)
2294 {
2295         int ret, total_tc = 0, i;
2296         uint16_t qpnum_per_tc, bsf, qp_idx;
2297
2298         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2299         if (ret != I40E_SUCCESS)
2300                 return ret;
2301
2302         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2303                 if (enabled_tcmap & (1 << i))
2304                         total_tc++;
2305         vsi->enabled_tc = enabled_tcmap;
2306
2307         /* Number of queues per enabled TC */
2308         qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2309         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2310         bsf = rte_bsf32(qpnum_per_tc);
2311
2312         /* Adjust the queue number to actual queues that can be applied */
2313         vsi->nb_qps = qpnum_per_tc * total_tc;
2314
2315         /**
2316          * Configure TC and queue mapping parameters, for enabled TC,
2317          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2318          * default queue will serve it.
2319          */
2320         qp_idx = 0;
2321         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2322                 if (vsi->enabled_tc & (1 << i)) {
2323                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2324                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2325                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2326                         qp_idx += qpnum_per_tc;
2327                 } else
2328                         info->tc_mapping[i] = 0;
2329         }
2330
2331         /* Associate queue number with VSI */
2332         if (vsi->type == I40E_VSI_SRIOV) {
2333                 info->mapping_flags |=
2334                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2335                 for (i = 0; i < vsi->nb_qps; i++)
2336                         info->queue_mapping[i] =
2337                                 rte_cpu_to_le_16(vsi->base_queue + i);
2338         } else {
2339                 info->mapping_flags |=
2340                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2341                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2342         }
2343         info->valid_sections =
2344                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2345
2346         return I40E_SUCCESS;
2347 }
2348
2349 static int
2350 i40e_veb_release(struct i40e_veb *veb)
2351 {
2352         struct i40e_vsi *vsi;
2353         struct i40e_hw *hw;
2354
2355         if (veb == NULL || veb->associate_vsi == NULL)
2356                 return -EINVAL;
2357
2358         if (!TAILQ_EMPTY(&veb->head)) {
2359                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2360                 return -EACCES;
2361         }
2362
2363         vsi = veb->associate_vsi;
2364         hw = I40E_VSI_TO_HW(vsi);
2365
2366         vsi->uplink_seid = veb->uplink_seid;
2367         i40e_aq_delete_element(hw, veb->seid, NULL);
2368         rte_free(veb);
2369         vsi->veb = NULL;
2370         return I40E_SUCCESS;
2371 }
2372
2373 /* Setup a veb */
2374 static struct i40e_veb *
2375 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2376 {
2377         struct i40e_veb *veb;
2378         int ret;
2379         struct i40e_hw *hw;
2380
2381         if (NULL == pf || vsi == NULL) {
2382                 PMD_DRV_LOG(ERR, "veb setup failed, "
2383                             "associated VSI shouldn't null");
2384                 return NULL;
2385         }
2386         hw = I40E_PF_TO_HW(pf);
2387
2388         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2389         if (!veb) {
2390                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2391                 goto fail;
2392         }
2393
2394         veb->associate_vsi = vsi;
2395         TAILQ_INIT(&veb->head);
2396         veb->uplink_seid = vsi->uplink_seid;
2397
2398         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2399                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2400
2401         if (ret != I40E_SUCCESS) {
2402                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2403                             hw->aq.asq_last_status);
2404                 goto fail;
2405         }
2406
2407         /* get statistics index */
2408         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2409                                 &veb->stats_idx, NULL, NULL, NULL);
2410         if (ret != I40E_SUCCESS) {
2411                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2412                             hw->aq.asq_last_status);
2413                 goto fail;
2414         }
2415
2416         /* Get VEB bandwidth, to be implemented */
2417         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2418         vsi->uplink_seid = veb->seid;
2419
2420         return veb;
2421 fail:
2422         rte_free(veb);
2423         return NULL;
2424 }
2425
2426 int
2427 i40e_vsi_release(struct i40e_vsi *vsi)
2428 {
2429         struct i40e_pf *pf;
2430         struct i40e_hw *hw;
2431         struct i40e_vsi_list *vsi_list;
2432         int ret;
2433         struct i40e_mac_filter *f;
2434
2435         if (!vsi)
2436                 return I40E_SUCCESS;
2437
2438         pf = I40E_VSI_TO_PF(vsi);
2439         hw = I40E_VSI_TO_HW(vsi);
2440
2441         /* VSI has child to attach, release child first */
2442         if (vsi->veb) {
2443                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2444                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2445                                 return -1;
2446                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2447                 }
2448                 i40e_veb_release(vsi->veb);
2449         }
2450
2451         /* Remove all macvlan filters of the VSI */
2452         i40e_vsi_remove_all_macvlan_filter(vsi);
2453         TAILQ_FOREACH(f, &vsi->mac_list, next)
2454                 rte_free(f);
2455
2456         if (vsi->type != I40E_VSI_MAIN) {
2457                 /* Remove vsi from parent's sibling list */
2458                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2459                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2460                         return I40E_ERR_PARAM;
2461                 }
2462                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2463                                 &vsi->sib_vsi_list, list);
2464
2465                 /* Remove all switch element of the VSI */
2466                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2467                 if (ret != I40E_SUCCESS)
2468                         PMD_DRV_LOG(ERR, "Failed to delete element");
2469         }
2470         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2471
2472         if (vsi->type != I40E_VSI_SRIOV)
2473                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2474         rte_free(vsi);
2475
2476         return I40E_SUCCESS;
2477 }
2478
2479 static int
2480 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2481 {
2482         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2483         struct i40e_aqc_remove_macvlan_element_data def_filter;
2484         int ret;
2485
2486         if (vsi->type != I40E_VSI_MAIN)
2487                 return I40E_ERR_CONFIG;
2488         memset(&def_filter, 0, sizeof(def_filter));
2489         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2490                                         ETH_ADDR_LEN);
2491         def_filter.vlan_tag = 0;
2492         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2493                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2494         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2495         if (ret != I40E_SUCCESS) {
2496                 struct i40e_mac_filter *f;
2497
2498                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2499                             "macvlan filter");
2500                 /* It needs to add the permanent mac into mac list */
2501                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2502                 if (f == NULL) {
2503                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2504                         return I40E_ERR_NO_MEMORY;
2505                 }
2506                 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2507                                 ETH_ADDR_LEN);
2508                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2509                 vsi->mac_num++;
2510
2511                 return ret;
2512         }
2513
2514         return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2515 }
2516
2517 static int
2518 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2519 {
2520         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2521         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2522         struct i40e_hw *hw = &vsi->adapter->hw;
2523         i40e_status ret;
2524         int i;
2525
2526         memset(&bw_config, 0, sizeof(bw_config));
2527         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2528         if (ret != I40E_SUCCESS) {
2529                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2530                             hw->aq.asq_last_status);
2531                 return ret;
2532         }
2533
2534         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2535         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2536                                         &ets_sla_config, NULL);
2537         if (ret != I40E_SUCCESS) {
2538                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2539                             "configuration %u", hw->aq.asq_last_status);
2540                 return ret;
2541         }
2542
2543         /* Not store the info yet, just print out */
2544         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2545         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2546         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2547                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2548                             ets_sla_config.share_credits[i]);
2549                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2550                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
2551                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2552                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2553                             (i * 4));
2554         }
2555
2556         return 0;
2557 }
2558
2559 /* Setup a VSI */
2560 struct i40e_vsi *
2561 i40e_vsi_setup(struct i40e_pf *pf,
2562                enum i40e_vsi_type type,
2563                struct i40e_vsi *uplink_vsi,
2564                uint16_t user_param)
2565 {
2566         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2567         struct i40e_vsi *vsi;
2568         int ret;
2569         struct i40e_vsi_context ctxt;
2570         struct ether_addr broadcast =
2571                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2572
2573         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2574                 PMD_DRV_LOG(ERR, "VSI setup failed, "
2575                             "VSI link shouldn't be NULL");
2576                 return NULL;
2577         }
2578
2579         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2580                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2581                             "uplink VSI should be NULL");
2582                 return NULL;
2583         }
2584
2585         /* If uplink vsi didn't setup VEB, create one first */
2586         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2587                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2588
2589                 if (NULL == uplink_vsi->veb) {
2590                         PMD_DRV_LOG(ERR, "VEB setup failed");
2591                         return NULL;
2592                 }
2593         }
2594
2595         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2596         if (!vsi) {
2597                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2598                 return NULL;
2599         }
2600         TAILQ_INIT(&vsi->mac_list);
2601         vsi->type = type;
2602         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2603         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2604         vsi->parent_vsi = uplink_vsi;
2605         vsi->user_param = user_param;
2606         /* Allocate queues */
2607         switch (vsi->type) {
2608         case I40E_VSI_MAIN  :
2609                 vsi->nb_qps = pf->lan_nb_qps;
2610                 break;
2611         case I40E_VSI_SRIOV :
2612                 vsi->nb_qps = pf->vf_nb_qps;
2613                 break;
2614         default:
2615                 goto fail_mem;
2616         }
2617         ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2618         if (ret < 0) {
2619                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2620                                 vsi->seid, ret);
2621                 goto fail_mem;
2622         }
2623         vsi->base_queue = ret;
2624
2625         /* VF has MSIX interrupt in VF range, don't allocate here */
2626         if (type != I40E_VSI_SRIOV) {
2627                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2628                 if (ret < 0) {
2629                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2630                         goto fail_queue_alloc;
2631                 }
2632                 vsi->msix_intr = ret;
2633         } else
2634                 vsi->msix_intr = 0;
2635         /* Add VSI */
2636         if (type == I40E_VSI_MAIN) {
2637                 /* For main VSI, no need to add since it's default one */
2638                 vsi->uplink_seid = pf->mac_seid;
2639                 vsi->seid = pf->main_vsi_seid;
2640                 /* Bind queues with specific MSIX interrupt */
2641                 /**
2642                  * Needs 2 interrupt at least, one for misc cause which will
2643                  * enabled from OS side, Another for queues binding the
2644                  * interrupt from device side only.
2645                  */
2646
2647                 /* Get default VSI parameters from hardware */
2648                 memset(&ctxt, 0, sizeof(ctxt));
2649                 ctxt.seid = vsi->seid;
2650                 ctxt.pf_num = hw->pf_id;
2651                 ctxt.uplink_seid = vsi->uplink_seid;
2652                 ctxt.vf_num = 0;
2653                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2654                 if (ret != I40E_SUCCESS) {
2655                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
2656                         goto fail_msix_alloc;
2657                 }
2658                 (void)rte_memcpy(&vsi->info, &ctxt.info,
2659                         sizeof(struct i40e_aqc_vsi_properties_data));
2660                 vsi->vsi_id = ctxt.vsi_number;
2661                 vsi->info.valid_sections = 0;
2662
2663                 /* Configure tc, enabled TC0 only */
2664                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2665                         I40E_SUCCESS) {
2666                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2667                         goto fail_msix_alloc;
2668                 }
2669
2670                 /* TC, queue mapping */
2671                 memset(&ctxt, 0, sizeof(ctxt));
2672                 vsi->info.valid_sections |=
2673                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2674                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2675                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2676                 (void)rte_memcpy(&ctxt.info, &vsi->info,
2677                         sizeof(struct i40e_aqc_vsi_properties_data));
2678                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2679                                                 I40E_DEFAULT_TCMAP);
2680                 if (ret != I40E_SUCCESS) {
2681                         PMD_DRV_LOG(ERR, "Failed to configure "
2682                                     "TC queue mapping");
2683                         goto fail_msix_alloc;
2684                 }
2685                 ctxt.seid = vsi->seid;
2686                 ctxt.pf_num = hw->pf_id;
2687                 ctxt.uplink_seid = vsi->uplink_seid;
2688                 ctxt.vf_num = 0;
2689
2690                 /* Update VSI parameters */
2691                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2692                 if (ret != I40E_SUCCESS) {
2693                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
2694                         goto fail_msix_alloc;
2695                 }
2696
2697                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2698                                                 sizeof(vsi->info.tc_mapping));
2699                 (void)rte_memcpy(&vsi->info.queue_mapping,
2700                                 &ctxt.info.queue_mapping,
2701                         sizeof(vsi->info.queue_mapping));
2702                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2703                 vsi->info.valid_sections = 0;
2704
2705                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2706                                 ETH_ADDR_LEN);
2707
2708                 /**
2709                  * Updating default filter settings are necessary to prevent
2710                  * reception of tagged packets.
2711                  * Some old firmware configurations load a default macvlan
2712                  * filter which accepts both tagged and untagged packets.
2713                  * The updating is to use a normal filter instead if needed.
2714                  * For NVM 4.2.2 or after, the updating is not needed anymore.
2715                  * The firmware with correct configurations load the default
2716                  * macvlan filter which is expected and cannot be removed.
2717                  */
2718                 i40e_update_default_filter_setting(vsi);
2719         } else if (type == I40E_VSI_SRIOV) {
2720                 memset(&ctxt, 0, sizeof(ctxt));
2721                 /**
2722                  * For other VSI, the uplink_seid equals to uplink VSI's
2723                  * uplink_seid since they share same VEB
2724                  */
2725                 vsi->uplink_seid = uplink_vsi->uplink_seid;
2726                 ctxt.pf_num = hw->pf_id;
2727                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2728                 ctxt.uplink_seid = vsi->uplink_seid;
2729                 ctxt.connection_type = 0x1;
2730                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2731
2732                 /* Configure switch ID */
2733                 ctxt.info.valid_sections |=
2734                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2735                 ctxt.info.switch_id =
2736                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2737                 /* Configure port/vlan */
2738                 ctxt.info.valid_sections |=
2739                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2740                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2741                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2742                                                 I40E_DEFAULT_TCMAP);
2743                 if (ret != I40E_SUCCESS) {
2744                         PMD_DRV_LOG(ERR, "Failed to configure "
2745                                     "TC queue mapping");
2746                         goto fail_msix_alloc;
2747                 }
2748                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2749                 ctxt.info.valid_sections |=
2750                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2751                 /**
2752                  * Since VSI is not created yet, only configure parameter,
2753                  * will add vsi below.
2754                  */
2755         }
2756         else {
2757                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2758                 goto fail_msix_alloc;
2759         }
2760
2761         if (vsi->type != I40E_VSI_MAIN) {
2762                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2763                 if (ret) {
2764                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2765                                     hw->aq.asq_last_status);
2766                         goto fail_msix_alloc;
2767                 }
2768                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2769                 vsi->info.valid_sections = 0;
2770                 vsi->seid = ctxt.seid;
2771                 vsi->vsi_id = ctxt.vsi_number;
2772                 vsi->sib_vsi_list.vsi = vsi;
2773                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2774                                 &vsi->sib_vsi_list, list);
2775         }
2776
2777         /* MAC/VLAN configuration */
2778         ret = i40e_vsi_add_mac(vsi, &broadcast);
2779         if (ret != I40E_SUCCESS) {
2780                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2781                 goto fail_msix_alloc;
2782         }
2783
2784         /* Get VSI BW information */
2785         i40e_vsi_dump_bw_config(vsi);
2786         return vsi;
2787 fail_msix_alloc:
2788         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2789 fail_queue_alloc:
2790         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2791 fail_mem:
2792         rte_free(vsi);
2793         return NULL;
2794 }
2795
2796 /* Configure vlan stripping on or off */
2797 int
2798 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2799 {
2800         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2801         struct i40e_vsi_context ctxt;
2802         uint8_t vlan_flags;
2803         int ret = I40E_SUCCESS;
2804
2805         /* Check if it has been already on or off */
2806         if (vsi->info.valid_sections &
2807                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2808                 if (on) {
2809                         if ((vsi->info.port_vlan_flags &
2810                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2811                                 return 0; /* already on */
2812                 } else {
2813                         if ((vsi->info.port_vlan_flags &
2814                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2815                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2816                                 return 0; /* already off */
2817                 }
2818         }
2819
2820         if (on)
2821                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2822         else
2823                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2824         vsi->info.valid_sections =
2825                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2826         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2827         vsi->info.port_vlan_flags |= vlan_flags;
2828         ctxt.seid = vsi->seid;
2829         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2830         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2831         if (ret)
2832                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2833                             on ? "enable" : "disable");
2834
2835         return ret;
2836 }
2837
2838 static int
2839 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2840 {
2841         struct rte_eth_dev_data *data = dev->data;
2842         int ret;
2843
2844         /* Apply vlan offload setting */
2845         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2846
2847         /* Apply double-vlan setting, not implemented yet */
2848
2849         /* Apply pvid setting */
2850         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2851                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
2852         if (ret)
2853                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
2854
2855         return ret;
2856 }
2857
2858 static int
2859 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2860 {
2861         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2862
2863         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2864 }
2865
2866 static int
2867 i40e_update_flow_control(struct i40e_hw *hw)
2868 {
2869 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2870         struct i40e_link_status link_status;
2871         uint32_t rxfc = 0, txfc = 0, reg;
2872         uint8_t an_info;
2873         int ret;
2874
2875         memset(&link_status, 0, sizeof(link_status));
2876         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2877         if (ret != I40E_SUCCESS) {
2878                 PMD_DRV_LOG(ERR, "Failed to get link status information");
2879                 goto write_reg; /* Disable flow control */
2880         }
2881
2882         an_info = hw->phy.link_info.an_info;
2883         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2884                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
2885                 ret = I40E_ERR_NOT_READY;
2886                 goto write_reg; /* Disable flow control */
2887         }
2888         /**
2889          * If link auto negotiation is enabled, flow control needs to
2890          * be configured according to it
2891          */
2892         switch (an_info & I40E_LINK_PAUSE_RXTX) {
2893         case I40E_LINK_PAUSE_RXTX:
2894                 rxfc = 1;
2895                 txfc = 1;
2896                 hw->fc.current_mode = I40E_FC_FULL;
2897                 break;
2898         case I40E_AQ_LINK_PAUSE_RX:
2899                 rxfc = 1;
2900                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2901                 break;
2902         case I40E_AQ_LINK_PAUSE_TX:
2903                 txfc = 1;
2904                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2905                 break;
2906         default:
2907                 hw->fc.current_mode = I40E_FC_NONE;
2908                 break;
2909         }
2910
2911 write_reg:
2912         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2913                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2914         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2915         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2916         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2917         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2918
2919         return ret;
2920 }
2921
2922 /* PF setup */
2923 static int
2924 i40e_pf_setup(struct i40e_pf *pf)
2925 {
2926         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2927         struct i40e_filter_control_settings settings;
2928         struct rte_eth_dev_data *dev_data = pf->dev_data;
2929         struct i40e_vsi *vsi;
2930         int ret;
2931
2932         /* Clear all stats counters */
2933         pf->offset_loaded = FALSE;
2934         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2935         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2936
2937         ret = i40e_pf_get_switch_config(pf);
2938         if (ret != I40E_SUCCESS) {
2939                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2940                 return ret;
2941         }
2942
2943         /* VSI setup */
2944         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2945         if (!vsi) {
2946                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2947                 return I40E_ERR_NOT_READY;
2948         }
2949         pf->main_vsi = vsi;
2950         dev_data->nb_rx_queues = vsi->nb_qps;
2951         dev_data->nb_tx_queues = vsi->nb_qps;
2952
2953         /* Configure filter control */
2954         memset(&settings, 0, sizeof(settings));
2955         settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2956         /* Enable ethtype and macvlan filters */
2957         settings.enable_ethtype = TRUE;
2958         settings.enable_macvlan = TRUE;
2959         ret = i40e_set_filter_control(hw, &settings);
2960         if (ret)
2961                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2962                                                                 ret);
2963
2964         /* Update flow control according to the auto negotiation */
2965         i40e_update_flow_control(hw);
2966
2967         return I40E_SUCCESS;
2968 }
2969
2970 int
2971 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2972 {
2973         uint32_t reg;
2974         uint16_t j;
2975
2976         /**
2977          * Set or clear TX Queue Disable flags,
2978          * which is required by hardware.
2979          */
2980         i40e_pre_tx_queue_cfg(hw, q_idx, on);
2981         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
2982
2983         /* Wait until the request is finished */
2984         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2985                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2986                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2987                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2988                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2989                                                         & 0x1))) {
2990                         break;
2991                 }
2992         }
2993         if (on) {
2994                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2995                         return I40E_SUCCESS; /* already on, skip next steps */
2996
2997                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
2998                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2999         } else {
3000                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3001                         return I40E_SUCCESS; /* already off, skip next steps */
3002                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3003         }
3004         /* Write the register */
3005         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3006         /* Check the result */
3007         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3008                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3009                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3010                 if (on) {
3011                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3012                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3013                                 break;
3014                 } else {
3015                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3016                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3017                                 break;
3018                 }
3019         }
3020         /* Check if it is timeout */
3021         if (j >= I40E_CHK_Q_ENA_COUNT) {
3022                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3023                             (on ? "enable" : "disable"), q_idx);
3024                 return I40E_ERR_TIMEOUT;
3025         }
3026
3027         return I40E_SUCCESS;
3028 }
3029
3030 /* Swith on or off the tx queues */
3031 static int
3032 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3033 {
3034         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3035         struct i40e_tx_queue *txq;
3036         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3037         uint16_t i;
3038         int ret;
3039
3040         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3041                 txq = dev_data->tx_queues[i];
3042                 /* Don't operate the queue if not configured or
3043                  * if starting only per queue */
3044                 if (!txq->q_set || (on && txq->tx_deferred_start))
3045                         continue;
3046                 if (on)
3047                         ret = i40e_dev_tx_queue_start(dev, i);
3048                 else
3049                         ret = i40e_dev_tx_queue_stop(dev, i);
3050                 if ( ret != I40E_SUCCESS)
3051                         return ret;
3052         }
3053
3054         return I40E_SUCCESS;
3055 }
3056
3057 int
3058 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3059 {
3060         uint32_t reg;
3061         uint16_t j;
3062
3063         /* Wait until the request is finished */
3064         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3065                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3066                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3067                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3068                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3069                         break;
3070         }
3071
3072         if (on) {
3073                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3074                         return I40E_SUCCESS; /* Already on, skip next steps */
3075                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3076         } else {
3077                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3078                         return I40E_SUCCESS; /* Already off, skip next steps */
3079                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3080         }
3081
3082         /* Write the register */
3083         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3084         /* Check the result */
3085         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3086                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3087                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3088                 if (on) {
3089                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3090                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3091                                 break;
3092                 } else {
3093                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3094                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3095                                 break;
3096                 }
3097         }
3098
3099         /* Check if it is timeout */
3100         if (j >= I40E_CHK_Q_ENA_COUNT) {
3101                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3102                             (on ? "enable" : "disable"), q_idx);
3103                 return I40E_ERR_TIMEOUT;
3104         }
3105
3106         return I40E_SUCCESS;
3107 }
3108 /* Switch on or off the rx queues */
3109 static int
3110 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3111 {
3112         struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3113         struct i40e_rx_queue *rxq;
3114         struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3115         uint16_t i;
3116         int ret;
3117
3118         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3119                 rxq = dev_data->rx_queues[i];
3120                 /* Don't operate the queue if not configured or
3121                  * if starting only per queue */
3122                 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3123                         continue;
3124                 if (on)
3125                         ret = i40e_dev_rx_queue_start(dev, i);
3126                 else
3127                         ret = i40e_dev_rx_queue_stop(dev, i);
3128                 if (ret != I40E_SUCCESS)
3129                         return ret;
3130         }
3131
3132         return I40E_SUCCESS;
3133 }
3134
3135 /* Switch on or off all the rx/tx queues */
3136 int
3137 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3138 {
3139         int ret;
3140
3141         if (on) {
3142                 /* enable rx queues before enabling tx queues */
3143                 ret = i40e_vsi_switch_rx_queues(vsi, on);
3144                 if (ret) {
3145                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3146                         return ret;
3147                 }
3148                 ret = i40e_vsi_switch_tx_queues(vsi, on);
3149         } else {
3150                 /* Stop tx queues before stopping rx queues */
3151                 ret = i40e_vsi_switch_tx_queues(vsi, on);
3152                 if (ret) {
3153                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3154                         return ret;
3155                 }
3156                 ret = i40e_vsi_switch_rx_queues(vsi, on);
3157         }
3158
3159         return ret;
3160 }
3161
3162 /* Initialize VSI for TX */
3163 static int
3164 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3165 {
3166         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3167         struct rte_eth_dev_data *data = pf->dev_data;
3168         uint16_t i;
3169         uint32_t ret = I40E_SUCCESS;
3170
3171         for (i = 0; i < data->nb_tx_queues; i++) {
3172                 ret = i40e_tx_queue_init(data->tx_queues[i]);
3173                 if (ret != I40E_SUCCESS)
3174                         break;
3175         }
3176
3177         return ret;
3178 }
3179
3180 /* Initialize VSI for RX */
3181 static int
3182 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3183 {
3184         struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3185         struct rte_eth_dev_data *data = pf->dev_data;
3186         int ret = I40E_SUCCESS;
3187         uint16_t i;
3188
3189         i40e_pf_config_mq_rx(pf);
3190         for (i = 0; i < data->nb_rx_queues; i++) {
3191                 ret = i40e_rx_queue_init(data->rx_queues[i]);
3192                 if (ret != I40E_SUCCESS) {
3193                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3194                                     "initialization");
3195                         break;
3196                 }
3197         }
3198
3199         return ret;
3200 }
3201
3202 /* Initialize VSI */
3203 static int
3204 i40e_vsi_init(struct i40e_vsi *vsi)
3205 {
3206         int err;
3207
3208         err = i40e_vsi_tx_init(vsi);
3209         if (err) {
3210                 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3211                 return err;
3212         }
3213         err = i40e_vsi_rx_init(vsi);
3214         if (err) {
3215                 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3216                 return err;
3217         }
3218
3219         return err;
3220 }
3221
3222 static void
3223 i40e_stat_update_32(struct i40e_hw *hw,
3224                    uint32_t reg,
3225                    bool offset_loaded,
3226                    uint64_t *offset,
3227                    uint64_t *stat)
3228 {
3229         uint64_t new_data;
3230
3231         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3232         if (!offset_loaded)
3233                 *offset = new_data;
3234
3235         if (new_data >= *offset)
3236                 *stat = (uint64_t)(new_data - *offset);
3237         else
3238                 *stat = (uint64_t)((new_data +
3239                         ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3240 }
3241
3242 static void
3243 i40e_stat_update_48(struct i40e_hw *hw,
3244                    uint32_t hireg,
3245                    uint32_t loreg,
3246                    bool offset_loaded,
3247                    uint64_t *offset,
3248                    uint64_t *stat)
3249 {
3250         uint64_t new_data;
3251
3252         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3253         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3254                         I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3255
3256         if (!offset_loaded)
3257                 *offset = new_data;
3258
3259         if (new_data >= *offset)
3260                 *stat = new_data - *offset;
3261         else
3262                 *stat = (uint64_t)((new_data +
3263                         ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3264
3265         *stat &= I40E_48_BIT_MASK;
3266 }
3267
3268 /* Disable IRQ0 */
3269 void
3270 i40e_pf_disable_irq0(struct i40e_hw *hw)
3271 {
3272         /* Disable all interrupt types */
3273         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3274         I40E_WRITE_FLUSH(hw);
3275 }
3276
3277 /* Enable IRQ0 */
3278 void
3279 i40e_pf_enable_irq0(struct i40e_hw *hw)
3280 {
3281         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3282                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3283                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3284                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3285         I40E_WRITE_FLUSH(hw);
3286 }
3287
3288 static void
3289 i40e_pf_config_irq0(struct i40e_hw *hw)
3290 {
3291         uint32_t enable;
3292
3293         /* read pending request and disable first */
3294         i40e_pf_disable_irq0(hw);
3295         /**
3296          * Enable all interrupt error options to detect possible errors,
3297          * other informative int are ignored
3298          */
3299         enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3300                  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3301                  I40E_PFINT_ICR0_ENA_GRST_MASK |
3302                  I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3303                  I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3304                  I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3305                  I40E_PFINT_ICR0_ENA_VFLR_MASK |
3306                  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3307
3308         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3309         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3310                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3311
3312         /* Link no queues with irq0 */
3313         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3314                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3315 }
3316
3317 static void
3318 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3319 {
3320         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3321         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3322         int i;
3323         uint16_t abs_vf_id;
3324         uint32_t index, offset, val;
3325
3326         if (!pf->vfs)
3327                 return;
3328         /**
3329          * Try to find which VF trigger a reset, use absolute VF id to access
3330          * since the reg is global register.
3331          */
3332         for (i = 0; i < pf->vf_num; i++) {
3333                 abs_vf_id = hw->func_caps.vf_base_id + i;
3334                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3335                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3336                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3337                 /* VFR event occured */
3338                 if (val & (0x1 << offset)) {
3339                         int ret;
3340
3341                         /* Clear the event first */
3342                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3343                                                         (0x1 << offset));
3344                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3345                         /**
3346                          * Only notify a VF reset event occured,
3347                          * don't trigger another SW reset
3348                          */
3349                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3350                         if (ret != I40E_SUCCESS)
3351                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3352                 }
3353         }
3354 }
3355
3356 static void
3357 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3358 {
3359         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3360         struct i40e_arq_event_info info;
3361         uint16_t pending, opcode;
3362         int ret;
3363
3364         info.buf_len = I40E_AQ_BUF_SZ;
3365         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3366         if (!info.msg_buf) {
3367                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3368                 return;
3369         }
3370
3371         pending = 1;
3372         while (pending) {
3373                 ret = i40e_clean_arq_element(hw, &info, &pending);
3374
3375                 if (ret != I40E_SUCCESS) {
3376                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3377                                     "aq_err: %u", hw->aq.asq_last_status);
3378                         break;
3379                 }
3380                 opcode = rte_le_to_cpu_16(info.desc.opcode);
3381
3382                 switch (opcode) {
3383                 case i40e_aqc_opc_send_msg_to_pf:
3384                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3385                         i40e_pf_host_handle_vf_msg(dev,
3386                                         rte_le_to_cpu_16(info.desc.retval),
3387                                         rte_le_to_cpu_32(info.desc.cookie_high),
3388                                         rte_le_to_cpu_32(info.desc.cookie_low),
3389                                         info.msg_buf,
3390                                         info.msg_len);
3391                         break;
3392                 default:
3393                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3394                                     opcode);
3395                         break;
3396                 }
3397         }
3398         rte_free(info.msg_buf);
3399 }
3400
3401 /**
3402  * Interrupt handler triggered by NIC  for handling
3403  * specific interrupt.
3404  *
3405  * @param handle
3406  *  Pointer to interrupt handle.
3407  * @param param
3408  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3409  *
3410  * @return
3411  *  void
3412  */
3413 static void
3414 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3415                            void *param)
3416 {
3417         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3418         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3419         uint32_t cause, enable;
3420
3421         i40e_pf_disable_irq0(hw);
3422
3423         cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3424         enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3425
3426         /* Shared IRQ case, return */
3427         if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3428                 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3429                             "no INT event to process", hw->pf_id);
3430                 goto done;
3431         }
3432
3433         if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3434                 PMD_DRV_LOG(INFO, "INT:Link status changed");
3435                 i40e_dev_link_update(dev, 0);
3436         }
3437
3438         if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3439                 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
3440
3441         if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3442                 PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
3443
3444         if (cause & I40E_PFINT_ICR0_GRST_MASK)
3445                 PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
3446
3447         if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3448                 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
3449
3450         if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3451                 PMD_DRV_LOG(INFO, "INT:HMC error occured");
3452
3453         /* Add processing func to deal with VF reset vent */
3454         if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3455                 PMD_DRV_LOG(INFO, "INT:VF reset detected");
3456                 i40e_dev_handle_vfr_event(dev);
3457         }
3458         /* Find admin queue event */
3459         if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3460                 PMD_DRV_LOG(INFO, "INT:ADMINQ event");
3461                 i40e_dev_handle_aq_msg(dev);
3462         }
3463
3464 done:
3465         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3466         /* Re-enable interrupt from device side */
3467         i40e_pf_enable_irq0(hw);
3468         /* Re-enable interrupt from host side */
3469         rte_intr_enable(&(dev->pci_dev->intr_handle));
3470 }
3471
3472 static int
3473 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3474                          struct i40e_macvlan_filter *filter,
3475                          int total)
3476 {
3477         int ele_num, ele_buff_size;
3478         int num, actual_num, i;
3479         int ret = I40E_SUCCESS;
3480         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3481         struct i40e_aqc_add_macvlan_element_data *req_list;
3482
3483         if (filter == NULL  || total == 0)
3484                 return I40E_ERR_PARAM;
3485         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3486         ele_buff_size = hw->aq.asq_buf_size;
3487
3488         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3489         if (req_list == NULL) {
3490                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3491                 return I40E_ERR_NO_MEMORY;
3492         }
3493
3494         num = 0;
3495         do {
3496                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3497                 memset(req_list, 0, ele_buff_size);
3498
3499                 for (i = 0; i < actual_num; i++) {
3500                         (void)rte_memcpy(req_list[i].mac_addr,
3501                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3502                         req_list[i].vlan_tag =
3503                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3504                         req_list[i].flags = rte_cpu_to_le_16(\
3505                                 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3506                         req_list[i].queue_number = 0;
3507                 }
3508
3509                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3510                                                 actual_num, NULL);
3511                 if (ret != I40E_SUCCESS) {
3512                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3513                         goto DONE;
3514                 }
3515                 num += actual_num;
3516         } while (num < total);
3517
3518 DONE:
3519         rte_free(req_list);
3520         return ret;
3521 }
3522
3523 static int
3524 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3525                             struct i40e_macvlan_filter *filter,
3526                             int total)
3527 {
3528         int ele_num, ele_buff_size;
3529         int num, actual_num, i;
3530         int ret = I40E_SUCCESS;
3531         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3532         struct i40e_aqc_remove_macvlan_element_data *req_list;
3533
3534         if (filter == NULL  || total == 0)
3535                 return I40E_ERR_PARAM;
3536
3537         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3538         ele_buff_size = hw->aq.asq_buf_size;
3539
3540         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3541         if (req_list == NULL) {
3542                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3543                 return I40E_ERR_NO_MEMORY;
3544         }
3545
3546         num = 0;
3547         do {
3548                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3549                 memset(req_list, 0, ele_buff_size);
3550
3551                 for (i = 0; i < actual_num; i++) {
3552                         (void)rte_memcpy(req_list[i].mac_addr,
3553                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
3554                         req_list[i].vlan_tag =
3555                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
3556                         req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3557                 }
3558
3559                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3560                                                 actual_num, NULL);
3561                 if (ret != I40E_SUCCESS) {
3562                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3563                         goto DONE;
3564                 }
3565                 num += actual_num;
3566         } while (num < total);
3567
3568 DONE:
3569         rte_free(req_list);
3570         return ret;
3571 }
3572
3573 /* Find out specific MAC filter */
3574 static struct i40e_mac_filter *
3575 i40e_find_mac_filter(struct i40e_vsi *vsi,
3576                          struct ether_addr *macaddr)
3577 {
3578         struct i40e_mac_filter *f;
3579
3580         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3581                 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3582                         return f;
3583         }
3584
3585         return NULL;
3586 }
3587
3588 static bool
3589 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3590                          uint16_t vlan_id)
3591 {
3592         uint32_t vid_idx, vid_bit;
3593
3594         vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3595         vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3596
3597         if (vsi->vfta[vid_idx] & vid_bit)
3598                 return 1;
3599         else
3600                 return 0;
3601 }
3602
3603 static void
3604 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3605                          uint16_t vlan_id, bool on)
3606 {
3607         uint32_t vid_idx, vid_bit;
3608
3609 #define UINT32_BIT_MASK      0x1F
3610 #define VALID_VLAN_BIT_MASK  0xFFF
3611         /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3612          *  element first, then find the bits it belongs to
3613          */
3614         vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3615                   sizeof(uint32_t));
3616         vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3617
3618         if (on)
3619                 vsi->vfta[vid_idx] |= vid_bit;
3620         else
3621                 vsi->vfta[vid_idx] &= ~vid_bit;
3622 }
3623
3624 /**
3625  * Find all vlan options for specific mac addr,
3626  * return with actual vlan found.
3627  */
3628 static inline int
3629 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3630                            struct i40e_macvlan_filter *mv_f,
3631                            int num, struct ether_addr *addr)
3632 {
3633         int i;
3634         uint32_t j, k;
3635
3636         /**
3637          * Not to use i40e_find_vlan_filter to decrease the loop time,
3638          * although the code looks complex.
3639           */
3640         if (num < vsi->vlan_num)
3641                 return I40E_ERR_PARAM;
3642
3643         i = 0;
3644         for (j = 0; j < I40E_VFTA_SIZE; j++) {
3645                 if (vsi->vfta[j]) {
3646                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3647                                 if (vsi->vfta[j] & (1 << k)) {
3648                                         if (i > num - 1) {
3649                                                 PMD_DRV_LOG(ERR, "vlan number "
3650                                                             "not match");
3651                                                 return I40E_ERR_PARAM;
3652                                         }
3653                                         (void)rte_memcpy(&mv_f[i].macaddr,
3654                                                         addr, ETH_ADDR_LEN);
3655                                         mv_f[i].vlan_id =
3656                                                 j * I40E_UINT32_BIT_SIZE + k;
3657                                         i++;
3658                                 }
3659                         }
3660                 }
3661         }
3662         return I40E_SUCCESS;
3663 }
3664
3665 static inline int
3666 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3667                            struct i40e_macvlan_filter *mv_f,
3668                            int num,
3669                            uint16_t vlan)
3670 {
3671         int i = 0;
3672         struct i40e_mac_filter *f;
3673
3674         if (num < vsi->mac_num)
3675                 return I40E_ERR_PARAM;
3676
3677         TAILQ_FOREACH(f, &vsi->mac_list, next) {
3678                 if (i > num - 1) {
3679                         PMD_DRV_LOG(ERR, "buffer number not match");
3680                         return I40E_ERR_PARAM;
3681                 }
3682                 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3683                 mv_f[i].vlan_id = vlan;
3684                 i++;
3685         }
3686
3687         return I40E_SUCCESS;
3688 }
3689
3690 static int
3691 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3692 {
3693         int i, num;
3694         struct i40e_mac_filter *f;
3695         struct i40e_macvlan_filter *mv_f;
3696         int ret = I40E_SUCCESS;
3697
3698         if (vsi == NULL || vsi->mac_num == 0)
3699                 return I40E_ERR_PARAM;
3700
3701         /* Case that no vlan is set */
3702         if (vsi->vlan_num == 0)
3703                 num = vsi->mac_num;
3704         else
3705                 num = vsi->mac_num * vsi->vlan_num;
3706
3707         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3708         if (mv_f == NULL) {
3709                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3710                 return I40E_ERR_NO_MEMORY;
3711         }
3712
3713         i = 0;
3714         if (vsi->vlan_num == 0) {
3715                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3716                         (void)rte_memcpy(&mv_f[i].macaddr,
3717                                 &f->macaddr, ETH_ADDR_LEN);
3718                         mv_f[i].vlan_id = 0;
3719                         i++;
3720                 }
3721         } else {
3722                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3723                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3724                                         vsi->vlan_num, &f->macaddr);
3725                         if (ret != I40E_SUCCESS)
3726                                 goto DONE;
3727                         i += vsi->vlan_num;
3728                 }
3729         }
3730
3731         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3732 DONE:
3733         rte_free(mv_f);
3734
3735         return ret;
3736 }
3737
3738 int
3739 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3740 {
3741         struct i40e_macvlan_filter *mv_f;
3742         int mac_num;
3743         int ret = I40E_SUCCESS;
3744
3745         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3746                 return I40E_ERR_PARAM;
3747
3748         /* If it's already set, just return */
3749         if (i40e_find_vlan_filter(vsi,vlan))
3750                 return I40E_SUCCESS;
3751
3752         mac_num = vsi->mac_num;
3753
3754         if (mac_num == 0) {
3755                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3756                 return I40E_ERR_PARAM;
3757         }
3758
3759         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3760
3761         if (mv_f == NULL) {
3762                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3763                 return I40E_ERR_NO_MEMORY;
3764         }
3765
3766         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3767
3768         if (ret != I40E_SUCCESS)
3769                 goto DONE;
3770
3771         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3772
3773         if (ret != I40E_SUCCESS)
3774                 goto DONE;
3775
3776         i40e_set_vlan_filter(vsi, vlan, 1);
3777
3778         vsi->vlan_num++;
3779         ret = I40E_SUCCESS;
3780 DONE:
3781         rte_free(mv_f);
3782         return ret;
3783 }
3784
3785 int
3786 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3787 {
3788         struct i40e_macvlan_filter *mv_f;
3789         int mac_num;
3790         int ret = I40E_SUCCESS;
3791
3792         /**
3793          * Vlan 0 is the generic filter for untagged packets
3794          * and can't be removed.
3795          */
3796         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3797                 return I40E_ERR_PARAM;
3798
3799         /* If can't find it, just return */
3800         if (!i40e_find_vlan_filter(vsi, vlan))
3801                 return I40E_ERR_PARAM;
3802
3803         mac_num = vsi->mac_num;
3804
3805         if (mac_num == 0) {
3806                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3807                 return I40E_ERR_PARAM;
3808         }
3809
3810         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3811
3812         if (mv_f == NULL) {
3813                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3814                 return I40E_ERR_NO_MEMORY;
3815         }
3816
3817         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3818
3819         if (ret != I40E_SUCCESS)
3820                 goto DONE;
3821
3822         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3823
3824         if (ret != I40E_SUCCESS)
3825                 goto DONE;
3826
3827         /* This is last vlan to remove, replace all mac filter with vlan 0 */
3828         if (vsi->vlan_num == 1) {
3829                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3830                 if (ret != I40E_SUCCESS)
3831                         goto DONE;
3832
3833                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3834                 if (ret != I40E_SUCCESS)
3835                         goto DONE;
3836         }
3837
3838         i40e_set_vlan_filter(vsi, vlan, 0);
3839
3840         vsi->vlan_num--;
3841         ret = I40E_SUCCESS;
3842 DONE:
3843         rte_free(mv_f);
3844         return ret;
3845 }
3846
3847 int
3848 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3849 {
3850         struct i40e_mac_filter *f;
3851         struct i40e_macvlan_filter *mv_f;
3852         int vlan_num;
3853         int ret = I40E_SUCCESS;
3854
3855         /* If it's add and we've config it, return */
3856         f = i40e_find_mac_filter(vsi, addr);
3857         if (f != NULL)
3858                 return I40E_SUCCESS;
3859
3860         /**
3861          * If vlan_num is 0, that's the first time to add mac,
3862          * set mask for vlan_id 0.
3863          */
3864         if (vsi->vlan_num == 0) {
3865                 i40e_set_vlan_filter(vsi, 0, 1);
3866                 vsi->vlan_num = 1;
3867         }
3868
3869         vlan_num = vsi->vlan_num;
3870
3871         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3872         if (mv_f == NULL) {
3873                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3874                 return I40E_ERR_NO_MEMORY;
3875         }
3876
3877         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3878         if (ret != I40E_SUCCESS)
3879                 goto DONE;
3880
3881         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3882         if (ret != I40E_SUCCESS)
3883                 goto DONE;
3884
3885         /* Add the mac addr into mac list */
3886         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3887         if (f == NULL) {
3888                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3889                 ret = I40E_ERR_NO_MEMORY;
3890                 goto DONE;
3891         }
3892         (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3893         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3894         vsi->mac_num++;
3895
3896         ret = I40E_SUCCESS;
3897 DONE:
3898         rte_free(mv_f);
3899
3900         return ret;
3901 }
3902
3903 int
3904 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3905 {
3906         struct i40e_mac_filter *f;
3907         struct i40e_macvlan_filter *mv_f;
3908         int vlan_num;
3909         int ret = I40E_SUCCESS;
3910
3911         /* Can't find it, return an error */
3912         f = i40e_find_mac_filter(vsi, addr);
3913         if (f == NULL)
3914                 return I40E_ERR_PARAM;
3915
3916         vlan_num = vsi->vlan_num;
3917         if (vlan_num == 0) {
3918                 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
3919                 return I40E_ERR_PARAM;
3920         }
3921         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3922         if (mv_f == NULL) {
3923                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3924                 return I40E_ERR_NO_MEMORY;
3925         }
3926
3927         ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3928         if (ret != I40E_SUCCESS)
3929                 goto DONE;
3930
3931         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3932         if (ret != I40E_SUCCESS)
3933                 goto DONE;
3934
3935         /* Remove the mac addr into mac list */
3936         TAILQ_REMOVE(&vsi->mac_list, f, next);
3937         rte_free(f);
3938         vsi->mac_num--;
3939
3940         ret = I40E_SUCCESS;
3941 DONE:
3942         rte_free(mv_f);
3943         return ret;
3944 }
3945
3946 /* Configure hash enable flags for RSS */
3947 uint64_t
3948 i40e_config_hena(uint64_t flags)
3949 {
3950         uint64_t hena = 0;
3951
3952         if (!flags)
3953                 return hena;
3954
3955         if (flags & ETH_RSS_NONF_IPV4_UDP)
3956                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3957         if (flags & ETH_RSS_NONF_IPV4_TCP)
3958                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3959         if (flags & ETH_RSS_NONF_IPV4_SCTP)
3960                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3961         if (flags & ETH_RSS_NONF_IPV4_OTHER)
3962                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3963         if (flags & ETH_RSS_FRAG_IPV4)
3964                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3965         if (flags & ETH_RSS_NONF_IPV6_UDP)
3966                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3967         if (flags & ETH_RSS_NONF_IPV6_TCP)
3968                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3969         if (flags & ETH_RSS_NONF_IPV6_SCTP)
3970                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3971         if (flags & ETH_RSS_NONF_IPV6_OTHER)
3972                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3973         if (flags & ETH_RSS_FRAG_IPV6)
3974                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3975         if (flags & ETH_RSS_L2_PAYLOAD)
3976                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3977
3978         return hena;
3979 }
3980
3981 /* Parse the hash enable flags */
3982 uint64_t
3983 i40e_parse_hena(uint64_t flags)
3984 {
3985         uint64_t rss_hf = 0;
3986
3987         if (!flags)
3988                 return rss_hf;
3989
3990         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3991                 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3992         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3993                 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3994         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3995                 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3996         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3997                 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3998         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3999                 rss_hf |= ETH_RSS_FRAG_IPV4;
4000         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4001                 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
4002         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4003                 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
4004         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4005                 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
4006         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4007                 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
4008         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4009                 rss_hf |= ETH_RSS_FRAG_IPV6;
4010         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4011                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4012
4013         return rss_hf;
4014 }
4015
4016 /* Disable RSS */
4017 static void
4018 i40e_pf_disable_rss(struct i40e_pf *pf)
4019 {
4020         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4021         uint64_t hena;
4022
4023         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4024         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4025         hena &= ~I40E_RSS_HENA_ALL;
4026         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4027         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4028         I40E_WRITE_FLUSH(hw);
4029 }
4030
4031 static int
4032 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4033 {
4034         uint32_t *hash_key;
4035         uint8_t hash_key_len;
4036         uint64_t rss_hf;
4037         uint16_t i;
4038         uint64_t hena;
4039
4040         hash_key = (uint32_t *)(rss_conf->rss_key);
4041         hash_key_len = rss_conf->rss_key_len;
4042         if (hash_key != NULL && hash_key_len >=
4043                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4044                 /* Fill in RSS hash key */
4045                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4046                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4047         }
4048
4049         rss_hf = rss_conf->rss_hf;
4050         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4051         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4052         hena &= ~I40E_RSS_HENA_ALL;
4053         hena |= i40e_config_hena(rss_hf);
4054         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4055         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4056         I40E_WRITE_FLUSH(hw);
4057
4058         return 0;
4059 }
4060
4061 static int
4062 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4063                          struct rte_eth_rss_conf *rss_conf)
4064 {
4065         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4066         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4067         uint64_t hena;
4068
4069         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4070         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4071         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4072                 if (rss_hf != 0) /* Enable RSS */
4073                         return -EINVAL;
4074                 return 0; /* Nothing to do */
4075         }
4076         /* RSS enabled */
4077         if (rss_hf == 0) /* Disable RSS */
4078                 return -EINVAL;
4079
4080         return i40e_hw_rss_hash_set(hw, rss_conf);
4081 }
4082
4083 static int
4084 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4085                            struct rte_eth_rss_conf *rss_conf)
4086 {
4087         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4088         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4089         uint64_t hena;
4090         uint16_t i;
4091
4092         if (hash_key != NULL) {
4093                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4094                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4095                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4096         }
4097         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4098         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4099         rss_conf->rss_hf = i40e_parse_hena(hena);
4100
4101         return 0;
4102 }
4103
4104 static int
4105 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4106 {
4107         switch (filter_type) {
4108         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4109                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4110                 break;
4111         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4112                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4113                 break;
4114         case RTE_TUNNEL_FILTER_IMAC_TENID:
4115                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4116                 break;
4117         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4118                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4119                 break;
4120         case ETH_TUNNEL_FILTER_IMAC:
4121                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4122                 break;
4123         default:
4124                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4125                 return -EINVAL;
4126         }
4127
4128         return 0;
4129 }
4130
4131 static int
4132 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4133                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4134                         uint8_t add)
4135 {
4136         uint16_t ip_type;
4137         uint8_t tun_type = 0;
4138         int val, ret = 0;
4139         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4140         struct i40e_vsi *vsi = pf->main_vsi;
4141         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4142         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4143
4144         cld_filter = rte_zmalloc("tunnel_filter",
4145                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4146                 0);
4147
4148         if (NULL == cld_filter) {
4149                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4150                 return -EINVAL;
4151         }
4152         pfilter = cld_filter;
4153
4154         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4155                         sizeof(struct ether_addr));
4156         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4157                         sizeof(struct ether_addr));
4158
4159         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4160         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4161                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4162                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4163                                 &tunnel_filter->ip_addr,
4164                                 sizeof(pfilter->ipaddr.v4.data));
4165         } else {
4166                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4167                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4168                                 &tunnel_filter->ip_addr,
4169                                 sizeof(pfilter->ipaddr.v6.data));
4170         }
4171
4172         /* check tunneled type */
4173         switch (tunnel_filter->tunnel_type) {
4174         case RTE_TUNNEL_TYPE_VXLAN:
4175                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4176                 break;
4177         default:
4178                 /* Other tunnel types is not supported. */
4179                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4180                 rte_free(cld_filter);
4181                 return -EINVAL;
4182         }
4183
4184         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4185                                                 &pfilter->flags);
4186         if (val < 0) {
4187                 rte_free(cld_filter);
4188                 return -EINVAL;
4189         }
4190
4191         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4192                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4193         pfilter->tenant_id = tunnel_filter->tenant_id;
4194         pfilter->queue_number = tunnel_filter->queue_id;
4195
4196         if (add)
4197                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4198         else
4199                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4200                                                 cld_filter, 1);
4201
4202         rte_free(cld_filter);
4203         return ret;
4204 }
4205
4206 static int
4207 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4208 {
4209         uint8_t i;
4210
4211         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4212                 if (pf->vxlan_ports[i] == port)
4213                         return i;
4214         }
4215
4216         return -1;
4217 }
4218
4219 static int
4220 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4221 {
4222         int  idx, ret;
4223         uint8_t filter_idx;
4224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4225
4226         idx = i40e_get_vxlan_port_idx(pf, port);
4227
4228         /* Check if port already exists */
4229         if (idx >= 0) {
4230                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4231                 return -EINVAL;
4232         }
4233
4234         /* Now check if there is space to add the new port */
4235         idx = i40e_get_vxlan_port_idx(pf, 0);
4236         if (idx < 0) {
4237                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4238                         "not adding port %d", port);
4239                 return -ENOSPC;
4240         }
4241
4242         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4243                                         &filter_idx, NULL);
4244         if (ret < 0) {
4245                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4246                 return -1;
4247         }
4248
4249         PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
4250                          port,  filter_index);
4251
4252         /* New port: add it and mark its index in the bitmap */
4253         pf->vxlan_ports[idx] = port;
4254         pf->vxlan_bitmap |= (1 << idx);
4255
4256         if (!(pf->flags & I40E_FLAG_VXLAN))
4257                 pf->flags |= I40E_FLAG_VXLAN;
4258
4259         return 0;
4260 }
4261
4262 static int
4263 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4264 {
4265         int idx;
4266         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4267
4268         if (!(pf->flags & I40E_FLAG_VXLAN)) {
4269                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4270                 return -EINVAL;
4271         }
4272
4273         idx = i40e_get_vxlan_port_idx(pf, port);
4274
4275         if (idx < 0) {
4276                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4277                 return -EINVAL;
4278         }
4279
4280         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4281                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4282                 return -1;
4283         }
4284
4285         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4286                         port, idx);
4287
4288         pf->vxlan_ports[idx] = 0;
4289         pf->vxlan_bitmap &= ~(1 << idx);
4290
4291         if (!pf->vxlan_bitmap)
4292                 pf->flags &= ~I40E_FLAG_VXLAN;
4293
4294         return 0;
4295 }
4296
4297 /* Add UDP tunneling port */
4298 static int
4299 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4300                         struct rte_eth_udp_tunnel *udp_tunnel)
4301 {
4302         int ret = 0;
4303         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4304
4305         if (udp_tunnel == NULL)
4306                 return -EINVAL;
4307
4308         switch (udp_tunnel->prot_type) {
4309         case RTE_TUNNEL_TYPE_VXLAN:
4310                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4311                 break;
4312
4313         case RTE_TUNNEL_TYPE_GENEVE:
4314         case RTE_TUNNEL_TYPE_TEREDO:
4315                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4316                 ret = -1;
4317                 break;
4318
4319         default:
4320                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4321                 ret = -1;
4322                 break;
4323         }
4324
4325         return ret;
4326 }
4327
4328 /* Remove UDP tunneling port */
4329 static int
4330 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4331                         struct rte_eth_udp_tunnel *udp_tunnel)
4332 {
4333         int ret = 0;
4334         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4335
4336         if (udp_tunnel == NULL)
4337                 return -EINVAL;
4338
4339         switch (udp_tunnel->prot_type) {
4340         case RTE_TUNNEL_TYPE_VXLAN:
4341                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4342                 break;
4343         case RTE_TUNNEL_TYPE_GENEVE:
4344         case RTE_TUNNEL_TYPE_TEREDO:
4345                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4346                 ret = -1;
4347                 break;
4348         default:
4349                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4350                 ret = -1;
4351                 break;
4352         }
4353
4354         return ret;
4355 }
4356
4357 /* Configure RSS */
4358 static int
4359 i40e_pf_config_rss(struct i40e_pf *pf)
4360 {
4361         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4362         struct rte_eth_rss_conf rss_conf;
4363         uint32_t i, lut = 0;
4364         uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4365
4366         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4367                 if (j == num)
4368                         j = 0;
4369                 lut = (lut << 8) | (j & ((0x1 <<
4370                         hw->func_caps.rss_table_entry_width) - 1));
4371                 if ((i & 3) == 3)
4372                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4373         }
4374
4375         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4376         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4377                 i40e_pf_disable_rss(pf);
4378                 return 0;
4379         }
4380         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4381                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4382                 /* Calculate the default hash key */
4383                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4384                         rss_key_default[i] = (uint32_t)rte_rand();
4385                 rss_conf.rss_key = (uint8_t *)rss_key_default;
4386                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4387                                                         sizeof(uint32_t);
4388         }
4389
4390         return i40e_hw_rss_hash_set(hw, &rss_conf);
4391 }
4392
4393 static int
4394 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
4395                         struct rte_eth_tunnel_filter_conf *filter)
4396 {
4397         if (pf == NULL || filter == NULL) {
4398                 PMD_DRV_LOG(ERR, "Invalid parameter");
4399                 return -EINVAL;
4400         }
4401
4402         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
4403                 PMD_DRV_LOG(ERR, "Invalid queue ID");
4404                 return -EINVAL;
4405         }
4406
4407         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
4408                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
4409                 return -EINVAL;
4410         }
4411
4412         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
4413                 (is_zero_ether_addr(filter->outer_mac))) {
4414                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
4415                 return -EINVAL;
4416         }
4417
4418         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
4419                 (is_zero_ether_addr(filter->inner_mac))) {
4420                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
4421                 return -EINVAL;
4422         }
4423
4424         return 0;
4425 }
4426
4427 static int
4428 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4429                         void *arg)
4430 {
4431         struct rte_eth_tunnel_filter_conf *filter;
4432         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4433         int ret = I40E_SUCCESS;
4434
4435         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
4436
4437         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
4438                 return I40E_ERR_PARAM;
4439
4440         switch (filter_op) {
4441         case RTE_ETH_FILTER_NOP:
4442                 if (!(pf->flags & I40E_FLAG_VXLAN))
4443                         ret = I40E_NOT_SUPPORTED;
4444         case RTE_ETH_FILTER_ADD:
4445                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
4446                 break;
4447         case RTE_ETH_FILTER_DELETE:
4448                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
4449                 break;
4450         default:
4451                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4452                 ret = I40E_ERR_PARAM;
4453                 break;
4454         }
4455
4456         return ret;
4457 }
4458
4459 static int
4460 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4461 {
4462         if (!pf->dev_data->sriov.active) {
4463                 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4464                 case ETH_MQ_RX_RSS:
4465                         i40e_pf_config_rss(pf);
4466                         break;
4467                 default:
4468                         i40e_pf_disable_rss(pf);
4469                         break;
4470                 }
4471         }
4472
4473         return 0;
4474 }
4475
4476 static int
4477 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4478                      enum rte_filter_type filter_type,
4479                      enum rte_filter_op filter_op,
4480                      void *arg)
4481 {
4482         int ret = 0;
4483
4484         if (dev == NULL)
4485                 return -EINVAL;
4486
4487         switch (filter_type) {
4488         case RTE_ETH_FILTER_TUNNEL:
4489                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
4490                 break;
4491         default:
4492                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4493                                                         filter_type);
4494                 ret = -EINVAL;
4495                 break;
4496         }
4497
4498         return ret;
4499 }