i40e: move to drivers/net/
[dpdk.git] / drivers / net / i40e / i40e_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42
43 #include <rte_string_fns.h>
44 #include <rte_pci.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
51 #include <rte_dev.h>
52 #include <rte_eth_ctrl.h>
53
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_adminq_cmd.h"
57 #include "base/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
60 #include "i40e_pf.h"
61
62 /* Maximun number of MAC addresses */
63 #define I40E_NUM_MACADDR_MAX       64
64 #define I40E_CLEAR_PXE_WAIT_MS     200
65
66 /* Maximun number of capability elements */
67 #define I40E_MAX_CAP_ELE_NUM       128
68
69 /* Wait count and inteval */
70 #define I40E_CHK_Q_ENA_COUNT       1000
71 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72
73 /* Maximun number of VSI */
74 #define I40E_MAX_NUM_VSIS          (384UL)
75
76 /* Default queue interrupt throttling time in microseconds */
77 #define I40E_ITR_INDEX_DEFAULT          0
78 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
79 #define I40E_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
80
81 #define I40E_PRE_TX_Q_CFG_WAIT_US       10 /* 10 us */
82
83 /* Mask of PF interrupt causes */
84 #define I40E_PFINT_ICR0_ENA_MASK ( \
85                 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
86                 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
87                 I40E_PFINT_ICR0_ENA_GRST_MASK | \
88                 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
89                 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
90                 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
91                 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
92                 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
93                 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
94                 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
95
96 #define I40E_FLOW_TYPES ( \
97         (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
98         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
99         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
100         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
101         (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
102         (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
103         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
104         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
105         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
106         (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
107         (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
108
109 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
110 static int i40e_dev_configure(struct rte_eth_dev *dev);
111 static int i40e_dev_start(struct rte_eth_dev *dev);
112 static void i40e_dev_stop(struct rte_eth_dev *dev);
113 static void i40e_dev_close(struct rte_eth_dev *dev);
114 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
115 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
116 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
117 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
118 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
119 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
120 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
121                                struct rte_eth_stats *stats);
122 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
123 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
124                                             uint16_t queue_id,
125                                             uint8_t stat_idx,
126                                             uint8_t is_rx);
127 static void i40e_dev_info_get(struct rte_eth_dev *dev,
128                               struct rte_eth_dev_info *dev_info);
129 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
130                                 uint16_t vlan_id,
131                                 int on);
132 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
133 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
134 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
135                                       uint16_t queue,
136                                       int on);
137 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
138 static int i40e_dev_led_on(struct rte_eth_dev *dev);
139 static int i40e_dev_led_off(struct rte_eth_dev *dev);
140 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
141                               struct rte_eth_fc_conf *fc_conf);
142 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
143                                        struct rte_eth_pfc_conf *pfc_conf);
144 static void i40e_macaddr_add(struct rte_eth_dev *dev,
145                           struct ether_addr *mac_addr,
146                           uint32_t index,
147                           uint32_t pool);
148 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
149 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
150                                     struct rte_eth_rss_reta_entry64 *reta_conf,
151                                     uint16_t reta_size);
152 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
153                                    struct rte_eth_rss_reta_entry64 *reta_conf,
154                                    uint16_t reta_size);
155
156 static int i40e_get_cap(struct i40e_hw *hw);
157 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
158 static int i40e_pf_setup(struct i40e_pf *pf);
159 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
160 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
161 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
162                 bool offset_loaded, uint64_t *offset, uint64_t *stat);
163 static void i40e_stat_update_48(struct i40e_hw *hw,
164                                uint32_t hireg,
165                                uint32_t loreg,
166                                bool offset_loaded,
167                                uint64_t *offset,
168                                uint64_t *stat);
169 static void i40e_pf_config_irq0(struct i40e_hw *hw);
170 static void i40e_dev_interrupt_handler(
171                 __rte_unused struct rte_intr_handle *handle, void *param);
172 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
173                                 uint32_t base, uint32_t num);
174 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
175 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
176                         uint32_t base);
177 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
178                         uint16_t num);
179 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
180 static int i40e_veb_release(struct i40e_veb *veb);
181 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
182                                                 struct i40e_vsi *vsi);
183 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
184 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
185 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
186                                              struct i40e_macvlan_filter *mv_f,
187                                              int num,
188                                              struct ether_addr *addr);
189 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
190                                              struct i40e_macvlan_filter *mv_f,
191                                              int num,
192                                              uint16_t vlan);
193 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
194 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
195                                     struct rte_eth_rss_conf *rss_conf);
196 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
197                                       struct rte_eth_rss_conf *rss_conf);
198 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
199                                 struct rte_eth_udp_tunnel *udp_tunnel);
200 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
201                                 struct rte_eth_udp_tunnel *udp_tunnel);
202 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
203                         struct rte_eth_ethertype_filter *filter,
204                         bool add);
205 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
206                                 enum rte_filter_op filter_op,
207                                 void *arg);
208 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
209                                 enum rte_filter_type filter_type,
210                                 enum rte_filter_op filter_op,
211                                 void *arg);
212 static void i40e_configure_registers(struct i40e_hw *hw);
213 static void i40e_hw_init(struct i40e_hw *hw);
214
215 static const struct rte_pci_id pci_id_i40e_map[] = {
216 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
217 #include "rte_pci_dev_ids.h"
218 { .vendor_id = 0, /* sentinel */ },
219 };
220
221 static const struct eth_dev_ops i40e_eth_dev_ops = {
222         .dev_configure                = i40e_dev_configure,
223         .dev_start                    = i40e_dev_start,
224         .dev_stop                     = i40e_dev_stop,
225         .dev_close                    = i40e_dev_close,
226         .promiscuous_enable           = i40e_dev_promiscuous_enable,
227         .promiscuous_disable          = i40e_dev_promiscuous_disable,
228         .allmulticast_enable          = i40e_dev_allmulticast_enable,
229         .allmulticast_disable         = i40e_dev_allmulticast_disable,
230         .dev_set_link_up              = i40e_dev_set_link_up,
231         .dev_set_link_down            = i40e_dev_set_link_down,
232         .link_update                  = i40e_dev_link_update,
233         .stats_get                    = i40e_dev_stats_get,
234         .stats_reset                  = i40e_dev_stats_reset,
235         .queue_stats_mapping_set      = i40e_dev_queue_stats_mapping_set,
236         .dev_infos_get                = i40e_dev_info_get,
237         .vlan_filter_set              = i40e_vlan_filter_set,
238         .vlan_tpid_set                = i40e_vlan_tpid_set,
239         .vlan_offload_set             = i40e_vlan_offload_set,
240         .vlan_strip_queue_set         = i40e_vlan_strip_queue_set,
241         .vlan_pvid_set                = i40e_vlan_pvid_set,
242         .rx_queue_start               = i40e_dev_rx_queue_start,
243         .rx_queue_stop                = i40e_dev_rx_queue_stop,
244         .tx_queue_start               = i40e_dev_tx_queue_start,
245         .tx_queue_stop                = i40e_dev_tx_queue_stop,
246         .rx_queue_setup               = i40e_dev_rx_queue_setup,
247         .rx_queue_release             = i40e_dev_rx_queue_release,
248         .rx_queue_count               = i40e_dev_rx_queue_count,
249         .rx_descriptor_done           = i40e_dev_rx_descriptor_done,
250         .tx_queue_setup               = i40e_dev_tx_queue_setup,
251         .tx_queue_release             = i40e_dev_tx_queue_release,
252         .dev_led_on                   = i40e_dev_led_on,
253         .dev_led_off                  = i40e_dev_led_off,
254         .flow_ctrl_set                = i40e_flow_ctrl_set,
255         .priority_flow_ctrl_set       = i40e_priority_flow_ctrl_set,
256         .mac_addr_add                 = i40e_macaddr_add,
257         .mac_addr_remove              = i40e_macaddr_remove,
258         .reta_update                  = i40e_dev_rss_reta_update,
259         .reta_query                   = i40e_dev_rss_reta_query,
260         .rss_hash_update              = i40e_dev_rss_hash_update,
261         .rss_hash_conf_get            = i40e_dev_rss_hash_conf_get,
262         .udp_tunnel_add               = i40e_dev_udp_tunnel_add,
263         .udp_tunnel_del               = i40e_dev_udp_tunnel_del,
264         .filter_ctrl                  = i40e_dev_filter_ctrl,
265 };
266
267 static struct eth_driver rte_i40e_pmd = {
268         {
269                 .name = "rte_i40e_pmd",
270                 .id_table = pci_id_i40e_map,
271                 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
272         },
273         .eth_dev_init = eth_i40e_dev_init,
274         .dev_private_size = sizeof(struct i40e_adapter),
275 };
276
277 static inline int
278 i40e_align_floor(int n)
279 {
280         if (n == 0)
281                 return 0;
282         return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
283 }
284
285 static inline int
286 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
287                                      struct rte_eth_link *link)
288 {
289         struct rte_eth_link *dst = link;
290         struct rte_eth_link *src = &(dev->data->dev_link);
291
292         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
293                                         *(uint64_t *)src) == 0)
294                 return -1;
295
296         return 0;
297 }
298
299 static inline int
300 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
301                                       struct rte_eth_link *link)
302 {
303         struct rte_eth_link *dst = &(dev->data->dev_link);
304         struct rte_eth_link *src = link;
305
306         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
307                                         *(uint64_t *)src) == 0)
308                 return -1;
309
310         return 0;
311 }
312
313 /*
314  * Driver initialization routine.
315  * Invoked once at EAL init time.
316  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
317  */
318 static int
319 rte_i40e_pmd_init(const char *name __rte_unused,
320                   const char *params __rte_unused)
321 {
322         PMD_INIT_FUNC_TRACE();
323         rte_eth_driver_register(&rte_i40e_pmd);
324
325         return 0;
326 }
327
328 static struct rte_driver rte_i40e_driver = {
329         .type = PMD_PDEV,
330         .init = rte_i40e_pmd_init,
331 };
332
333 PMD_REGISTER_DRIVER(rte_i40e_driver);
334
335 /*
336  * Initialize registers for flexible payload, which should be set by NVM.
337  * This should be removed from code once it is fixed in NVM.
338  */
339 #ifndef I40E_GLQF_ORT
340 #define I40E_GLQF_ORT(_i)    (0x00268900 + ((_i) * 4))
341 #endif
342 #ifndef I40E_GLQF_PIT
343 #define I40E_GLQF_PIT(_i)    (0x00268C80 + ((_i) * 4))
344 #endif
345
346 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
347 {
348         I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
349         I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
350         I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
351         I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
352         I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
353         I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
354         I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
355         I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
356         I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
357         I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
358
359         /* GLQF_PIT Registers */
360         I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
361         I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
362 }
363
364 static int
365 eth_i40e_dev_init(struct rte_eth_dev *dev)
366 {
367         struct rte_pci_device *pci_dev;
368         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
369         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370         struct i40e_vsi *vsi;
371         int ret;
372         uint32_t len;
373         uint8_t aq_fail = 0;
374
375         PMD_INIT_FUNC_TRACE();
376
377         dev->dev_ops = &i40e_eth_dev_ops;
378         dev->rx_pkt_burst = i40e_recv_pkts;
379         dev->tx_pkt_burst = i40e_xmit_pkts;
380
381         /* for secondary processes, we don't initialise any further as primary
382          * has already done this work. Only check we don't need a different
383          * RX function */
384         if (rte_eal_process_type() != RTE_PROC_PRIMARY){
385                 if (dev->data->scattered_rx)
386                         dev->rx_pkt_burst = i40e_recv_scattered_pkts;
387                 return 0;
388         }
389         pci_dev = dev->pci_dev;
390         pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
391         pf->adapter->eth_dev = dev;
392         pf->dev_data = dev->data;
393
394         hw->back = I40E_PF_TO_ADAPTER(pf);
395         hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
396         if (!hw->hw_addr) {
397                 PMD_INIT_LOG(ERR, "Hardware is not available, "
398                              "as address is NULL");
399                 return -ENODEV;
400         }
401
402         hw->vendor_id = pci_dev->id.vendor_id;
403         hw->device_id = pci_dev->id.device_id;
404         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
405         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
406         hw->bus.device = pci_dev->addr.devid;
407         hw->bus.func = pci_dev->addr.function;
408
409         /* Make sure all is clean before doing PF reset */
410         i40e_clear_hw(hw);
411
412         /* Initialize the hardware */
413         i40e_hw_init(hw);
414
415         /* Reset here to make sure all is clean for each PF */
416         ret = i40e_pf_reset(hw);
417         if (ret) {
418                 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
419                 return ret;
420         }
421
422         /* Initialize the shared code (base driver) */
423         ret = i40e_init_shared_code(hw);
424         if (ret) {
425                 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
426                 return ret;
427         }
428
429         /*
430          * To work around the NVM issue,initialize registers
431          * for flexible payload by software.
432          * It should be removed once issues are fixed in NVM.
433          */
434         i40e_flex_payload_reg_init(hw);
435
436         /* Initialize the parameters for adminq */
437         i40e_init_adminq_parameter(hw);
438         ret = i40e_init_adminq(hw);
439         if (ret != I40E_SUCCESS) {
440                 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
441                 return -EIO;
442         }
443         PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
444                      hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
445                      hw->aq.api_maj_ver, hw->aq.api_min_ver,
446                      ((hw->nvm.version >> 12) & 0xf),
447                      ((hw->nvm.version >> 4) & 0xff),
448                      (hw->nvm.version & 0xf), hw->nvm.eetrack);
449
450         /* Disable LLDP */
451         ret = i40e_aq_stop_lldp(hw, true, NULL);
452         if (ret != I40E_SUCCESS) /* Its failure can be ignored */
453                 PMD_INIT_LOG(INFO, "Failed to stop lldp");
454
455         /* Clear PXE mode */
456         i40e_clear_pxe_mode(hw);
457
458         /*
459          * On X710, performance number is far from the expectation on recent
460          * firmware versions. The fix for this issue may not be integrated in
461          * the following firmware version. So the workaround in software driver
462          * is needed. It needs to modify the initial values of 3 internal only
463          * registers. Note that the workaround can be removed when it is fixed
464          * in firmware in the future.
465          */
466         i40e_configure_registers(hw);
467
468         /* Get hw capabilities */
469         ret = i40e_get_cap(hw);
470         if (ret != I40E_SUCCESS) {
471                 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
472                 goto err_get_capabilities;
473         }
474
475         /* Initialize parameters for PF */
476         ret = i40e_pf_parameter_init(dev);
477         if (ret != 0) {
478                 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
479                 goto err_parameter_init;
480         }
481
482         /* Initialize the queue management */
483         ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
484         if (ret < 0) {
485                 PMD_INIT_LOG(ERR, "Failed to init queue pool");
486                 goto err_qp_pool_init;
487         }
488         ret = i40e_res_pool_init(&pf->msix_pool, 1,
489                                 hw->func_caps.num_msix_vectors - 1);
490         if (ret < 0) {
491                 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
492                 goto err_msix_pool_init;
493         }
494
495         /* Initialize lan hmc */
496         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
497                                 hw->func_caps.num_rx_qp, 0, 0);
498         if (ret != I40E_SUCCESS) {
499                 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
500                 goto err_init_lan_hmc;
501         }
502
503         /* Configure lan hmc */
504         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
505         if (ret != I40E_SUCCESS) {
506                 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
507                 goto err_configure_lan_hmc;
508         }
509
510         /* Get and check the mac address */
511         i40e_get_mac_addr(hw, hw->mac.addr);
512         if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
513                 PMD_INIT_LOG(ERR, "mac address is not valid");
514                 ret = -EIO;
515                 goto err_get_mac_addr;
516         }
517         /* Copy the permanent MAC address */
518         ether_addr_copy((struct ether_addr *) hw->mac.addr,
519                         (struct ether_addr *) hw->mac.perm_addr);
520
521         /* Disable flow control */
522         hw->fc.requested_mode = I40E_FC_NONE;
523         i40e_set_fc(hw, &aq_fail, TRUE);
524
525         /* PF setup, which includes VSI setup */
526         ret = i40e_pf_setup(pf);
527         if (ret) {
528                 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
529                 goto err_setup_pf_switch;
530         }
531
532         vsi = pf->main_vsi;
533
534         /* Disable double vlan by default */
535         i40e_vsi_config_double_vlan(vsi, FALSE);
536
537         if (!vsi->max_macaddrs)
538                 len = ETHER_ADDR_LEN;
539         else
540                 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
541
542         /* Should be after VSI initialized */
543         dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
544         if (!dev->data->mac_addrs) {
545                 PMD_INIT_LOG(ERR, "Failed to allocated memory "
546                                         "for storing mac address");
547                 goto err_mac_alloc;
548         }
549         ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
550                                         &dev->data->mac_addrs[0]);
551
552         /* initialize pf host driver to setup SRIOV resource if applicable */
553         i40e_pf_host_init(dev);
554
555         /* register callback func to eal lib */
556         rte_intr_callback_register(&(pci_dev->intr_handle),
557                 i40e_dev_interrupt_handler, (void *)dev);
558
559         /* configure and enable device interrupt */
560         i40e_pf_config_irq0(hw);
561         i40e_pf_enable_irq0(hw);
562
563         /* enable uio intr after callback register */
564         rte_intr_enable(&(pci_dev->intr_handle));
565
566         return 0;
567
568 err_mac_alloc:
569         i40e_vsi_release(pf->main_vsi);
570 err_setup_pf_switch:
571 err_get_mac_addr:
572 err_configure_lan_hmc:
573         (void)i40e_shutdown_lan_hmc(hw);
574 err_init_lan_hmc:
575         i40e_res_pool_destroy(&pf->msix_pool);
576 err_msix_pool_init:
577         i40e_res_pool_destroy(&pf->qp_pool);
578 err_qp_pool_init:
579 err_parameter_init:
580 err_get_capabilities:
581         (void)i40e_shutdown_adminq(hw);
582
583         return ret;
584 }
585
586 static int
587 i40e_dev_configure(struct rte_eth_dev *dev)
588 {
589         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590         enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
591         int ret;
592
593         if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
594                 ret = i40e_fdir_setup(pf);
595                 if (ret != I40E_SUCCESS) {
596                         PMD_DRV_LOG(ERR, "Failed to setup flow director.");
597                         return -ENOTSUP;
598                 }
599                 ret = i40e_fdir_configure(dev);
600                 if (ret < 0) {
601                         PMD_DRV_LOG(ERR, "failed to configure fdir.");
602                         goto err;
603                 }
604         } else
605                 i40e_fdir_teardown(pf);
606
607         ret = i40e_dev_init_vlan(dev);
608         if (ret < 0)
609                 goto err;
610
611         /* VMDQ setup.
612          *  Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
613          *  RSS setting have different requirements.
614          *  General PMD driver call sequence are NIC init, configure,
615          *  rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
616          *  will try to lookup the VSI that specific queue belongs to if VMDQ
617          *  applicable. So, VMDQ setting has to be done before
618          *  rx/tx_queue_setup(). This function is good  to place vmdq_setup.
619          *  For RSS setting, it will try to calculate actual configured RX queue
620          *  number, which will be available after rx_queue_setup(). dev_start()
621          *  function is good to place RSS setup.
622          */
623         if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
624                 ret = i40e_vmdq_setup(dev);
625                 if (ret)
626                         goto err;
627         }
628         return 0;
629 err:
630         i40e_fdir_teardown(pf);
631         return ret;
632 }
633
634 void
635 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
636 {
637         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
638         uint16_t msix_vect = vsi->msix_intr;
639         uint16_t i;
640
641         for (i = 0; i < vsi->nb_qps; i++) {
642                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
643                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
644                 rte_wmb();
645         }
646
647         if (vsi->type != I40E_VSI_SRIOV) {
648                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
649                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
650                                 msix_vect - 1), 0);
651         } else {
652                 uint32_t reg;
653                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
654                         vsi->user_param + (msix_vect - 1);
655
656                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
657         }
658         I40E_WRITE_FLUSH(hw);
659 }
660
661 static inline uint16_t
662 i40e_calc_itr_interval(int16_t interval)
663 {
664         if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
665                 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
666
667         /* Convert to hardware count, as writing each 1 represents 2 us */
668         return (interval/2);
669 }
670
671 void
672 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
673 {
674         uint32_t val;
675         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
676         uint16_t msix_vect = vsi->msix_intr;
677         int i;
678
679         for (i = 0; i < vsi->nb_qps; i++)
680                 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
681
682         /* Bind all RX queues to allocated MSIX interrupt */
683         for (i = 0; i < vsi->nb_qps; i++) {
684                 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
685                         I40E_QINT_RQCTL_ITR_INDX_MASK |
686                         ((vsi->base_queue + i + 1) <<
687                         I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
688                         (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
689                         I40E_QINT_RQCTL_CAUSE_ENA_MASK;
690
691                 if (i == vsi->nb_qps - 1)
692                         val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
693                 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
694         }
695
696         /* Write first RX queue to Link list register as the head element */
697         if (vsi->type != I40E_VSI_SRIOV) {
698                 uint16_t interval =
699                         i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
700
701                 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
702                                                 (vsi->base_queue <<
703                                 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
704                         (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
705
706                 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
707                                                 msix_vect - 1), interval);
708
709 #ifndef I40E_GLINT_CTL
710 #define I40E_GLINT_CTL                     0x0003F800
711 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
712 #endif
713                 /* Disable auto-mask on enabling of all none-zero  interrupt */
714                 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
715                         I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
716         } else {
717                 uint32_t reg;
718
719                 /* num_msix_vectors_vf needs to minus irq0 */
720                 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
721                         vsi->user_param + (msix_vect - 1);
722
723                 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
724                                         I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
725                                 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
726         }
727
728         I40E_WRITE_FLUSH(hw);
729 }
730
731 static void
732 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
733 {
734         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
735         uint16_t interval = i40e_calc_itr_interval(\
736                         RTE_LIBRTE_I40E_ITR_INTERVAL);
737
738         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
739                                         I40E_PFINT_DYN_CTLN_INTENA_MASK |
740                                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
741                                 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
742                         (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
743 }
744
745 static void
746 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
747 {
748         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
749
750         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
751 }
752
753 static inline uint8_t
754 i40e_parse_link_speed(uint16_t eth_link_speed)
755 {
756         uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
757
758         switch (eth_link_speed) {
759         case ETH_LINK_SPEED_40G:
760                 link_speed = I40E_LINK_SPEED_40GB;
761                 break;
762         case ETH_LINK_SPEED_20G:
763                 link_speed = I40E_LINK_SPEED_20GB;
764                 break;
765         case ETH_LINK_SPEED_10G:
766                 link_speed = I40E_LINK_SPEED_10GB;
767                 break;
768         case ETH_LINK_SPEED_1000:
769                 link_speed = I40E_LINK_SPEED_1GB;
770                 break;
771         case ETH_LINK_SPEED_100:
772                 link_speed = I40E_LINK_SPEED_100MB;
773                 break;
774         }
775
776         return link_speed;
777 }
778
779 static int
780 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
781 {
782         enum i40e_status_code status;
783         struct i40e_aq_get_phy_abilities_resp phy_ab;
784         struct i40e_aq_set_phy_config phy_conf;
785         const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
786                         I40E_AQ_PHY_FLAG_PAUSE_RX |
787                         I40E_AQ_PHY_FLAG_LOW_POWER;
788         const uint8_t advt = I40E_LINK_SPEED_40GB |
789                         I40E_LINK_SPEED_10GB |
790                         I40E_LINK_SPEED_1GB |
791                         I40E_LINK_SPEED_100MB;
792         int ret = -ENOTSUP;
793
794         /* Skip it on 40G interfaces, as a workaround for the link issue */
795         if (i40e_is_40G_device(hw->device_id))
796                 return I40E_SUCCESS;
797
798         status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
799                                               NULL);
800         if (status)
801                 return ret;
802
803         memset(&phy_conf, 0, sizeof(phy_conf));
804
805         /* bits 0-2 use the values from get_phy_abilities_resp */
806         abilities &= ~mask;
807         abilities |= phy_ab.abilities & mask;
808
809         /* update ablities and speed */
810         if (abilities & I40E_AQ_PHY_AN_ENABLED)
811                 phy_conf.link_speed = advt;
812         else
813                 phy_conf.link_speed = force_speed;
814
815         phy_conf.abilities = abilities;
816
817         /* use get_phy_abilities_resp value for the rest */
818         phy_conf.phy_type = phy_ab.phy_type;
819         phy_conf.eee_capability = phy_ab.eee_capability;
820         phy_conf.eeer = phy_ab.eeer_val;
821         phy_conf.low_power_ctrl = phy_ab.d3_lpan;
822
823         PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
824                     phy_ab.abilities, phy_ab.link_speed);
825         PMD_DRV_LOG(DEBUG, "\tConfig:  abilities %x, link_speed %x",
826                     phy_conf.abilities, phy_conf.link_speed);
827
828         status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
829         if (status)
830                 return ret;
831
832         return I40E_SUCCESS;
833 }
834
835 static int
836 i40e_apply_link_speed(struct rte_eth_dev *dev)
837 {
838         uint8_t speed;
839         uint8_t abilities = 0;
840         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841         struct rte_eth_conf *conf = &dev->data->dev_conf;
842
843         speed = i40e_parse_link_speed(conf->link_speed);
844         abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
845         if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
846                 abilities |= I40E_AQ_PHY_AN_ENABLED;
847         else
848                 abilities |= I40E_AQ_PHY_LINK_ENABLED;
849
850         return i40e_phy_conf_link(hw, abilities, speed);
851 }
852
853 static int
854 i40e_dev_start(struct rte_eth_dev *dev)
855 {
856         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
857         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858         struct i40e_vsi *main_vsi = pf->main_vsi;
859         int ret, i;
860
861         if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
862                 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
863                 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
864                              dev->data->dev_conf.link_duplex,
865                              dev->data->port_id);
866                 return -EINVAL;
867         }
868
869         /* Initialize VSI */
870         ret = i40e_dev_rxtx_init(pf);
871         if (ret != I40E_SUCCESS) {
872                 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
873                 goto err_up;
874         }
875
876         /* Map queues with MSIX interrupt */
877         i40e_vsi_queues_bind_intr(main_vsi);
878         i40e_vsi_enable_queues_intr(main_vsi);
879
880         /* Map VMDQ VSI queues with MSIX interrupt */
881         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
882                 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
883                 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
884         }
885
886         /* enable FDIR MSIX interrupt */
887         if (pf->fdir.fdir_vsi) {
888                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
889                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
890         }
891
892         /* Enable all queues which have been configured */
893         ret = i40e_dev_switch_queues(pf, TRUE);
894         if (ret != I40E_SUCCESS) {
895                 PMD_DRV_LOG(ERR, "Failed to enable VSI");
896                 goto err_up;
897         }
898
899         /* Enable receiving broadcast packets */
900         ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
901         if (ret != I40E_SUCCESS)
902                 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
903
904         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
905                 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
906                                                 true, NULL);
907                 if (ret != I40E_SUCCESS)
908                         PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
909         }
910
911         /* Apply link configure */
912         ret = i40e_apply_link_speed(dev);
913         if (I40E_SUCCESS != ret) {
914                 PMD_DRV_LOG(ERR, "Fail to apply link setting");
915                 goto err_up;
916         }
917
918         return I40E_SUCCESS;
919
920 err_up:
921         i40e_dev_switch_queues(pf, FALSE);
922         i40e_dev_clear_queues(dev);
923
924         return ret;
925 }
926
927 static void
928 i40e_dev_stop(struct rte_eth_dev *dev)
929 {
930         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
931         struct i40e_vsi *main_vsi = pf->main_vsi;
932         int i;
933
934         /* Disable all queues */
935         i40e_dev_switch_queues(pf, FALSE);
936
937         /* un-map queues with interrupt registers */
938         i40e_vsi_disable_queues_intr(main_vsi);
939         i40e_vsi_queues_unbind_intr(main_vsi);
940
941         for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
942                 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
943                 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
944         }
945
946         if (pf->fdir.fdir_vsi) {
947                 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
948                 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
949         }
950         /* Clear all queues and release memory */
951         i40e_dev_clear_queues(dev);
952
953         /* Set link down */
954         i40e_dev_set_link_down(dev);
955
956 }
957
958 static void
959 i40e_dev_close(struct rte_eth_dev *dev)
960 {
961         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
962         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
963         uint32_t reg;
964
965         PMD_INIT_FUNC_TRACE();
966
967         i40e_dev_stop(dev);
968
969         /* Disable interrupt */
970         i40e_pf_disable_irq0(hw);
971         rte_intr_disable(&(dev->pci_dev->intr_handle));
972
973         /* shutdown and destroy the HMC */
974         i40e_shutdown_lan_hmc(hw);
975
976         /* release all the existing VSIs and VEBs */
977         i40e_fdir_teardown(pf);
978         i40e_vsi_release(pf->main_vsi);
979
980         /* shutdown the adminq */
981         i40e_aq_queue_shutdown(hw, true);
982         i40e_shutdown_adminq(hw);
983
984         i40e_res_pool_destroy(&pf->qp_pool);
985         i40e_res_pool_destroy(&pf->msix_pool);
986
987         /* force a PF reset to clean anything leftover */
988         reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
989         I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
990                         (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
991         I40E_WRITE_FLUSH(hw);
992 }
993
994 static void
995 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
996 {
997         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
998         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
999         struct i40e_vsi *vsi = pf->main_vsi;
1000         int status;
1001
1002         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1003                                                         true, NULL);
1004         if (status != I40E_SUCCESS)
1005                 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1006
1007         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1008                                                         TRUE, NULL);
1009         if (status != I40E_SUCCESS)
1010                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1011
1012 }
1013
1014 static void
1015 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1016 {
1017         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1018         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1019         struct i40e_vsi *vsi = pf->main_vsi;
1020         int status;
1021
1022         status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1023                                                         false, NULL);
1024         if (status != I40E_SUCCESS)
1025                 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1026
1027         status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1028                                                         false, NULL);
1029         if (status != I40E_SUCCESS)
1030                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1031 }
1032
1033 static void
1034 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1035 {
1036         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1037         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1038         struct i40e_vsi *vsi = pf->main_vsi;
1039         int ret;
1040
1041         ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1042         if (ret != I40E_SUCCESS)
1043                 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1044 }
1045
1046 static void
1047 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1048 {
1049         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1050         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1051         struct i40e_vsi *vsi = pf->main_vsi;
1052         int ret;
1053
1054         if (dev->data->promiscuous == 1)
1055                 return; /* must remain in all_multicast mode */
1056
1057         ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1058                                 vsi->seid, FALSE, NULL);
1059         if (ret != I40E_SUCCESS)
1060                 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1061 }
1062
1063 /*
1064  * Set device link up.
1065  */
1066 static int
1067 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1068 {
1069         /* re-apply link speed setting */
1070         return i40e_apply_link_speed(dev);
1071 }
1072
1073 /*
1074  * Set device link down.
1075  */
1076 static int
1077 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1078 {
1079         uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1080         uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1081         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1082
1083         return i40e_phy_conf_link(hw, abilities, speed);
1084 }
1085
1086 int
1087 i40e_dev_link_update(struct rte_eth_dev *dev,
1088                      int wait_to_complete)
1089 {
1090 #define CHECK_INTERVAL 100  /* 100ms */
1091 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
1092         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1093         struct i40e_link_status link_status;
1094         struct rte_eth_link link, old;
1095         int status;
1096         unsigned rep_cnt = MAX_REPEAT_TIME;
1097
1098         memset(&link, 0, sizeof(link));
1099         memset(&old, 0, sizeof(old));
1100         memset(&link_status, 0, sizeof(link_status));
1101         rte_i40e_dev_atomic_read_link_status(dev, &old);
1102
1103         do {
1104                 /* Get link status information from hardware */
1105                 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1106                 if (status != I40E_SUCCESS) {
1107                         link.link_speed = ETH_LINK_SPEED_100;
1108                         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1109                         PMD_DRV_LOG(ERR, "Failed to get link info");
1110                         goto out;
1111                 }
1112
1113                 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1114                 if (!wait_to_complete)
1115                         break;
1116
1117                 rte_delay_ms(CHECK_INTERVAL);
1118         } while (!link.link_status && rep_cnt--);
1119
1120         if (!link.link_status)
1121                 goto out;
1122
1123         /* i40e uses full duplex only */
1124         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1125
1126         /* Parse the link status */
1127         switch (link_status.link_speed) {
1128         case I40E_LINK_SPEED_100MB:
1129                 link.link_speed = ETH_LINK_SPEED_100;
1130                 break;
1131         case I40E_LINK_SPEED_1GB:
1132                 link.link_speed = ETH_LINK_SPEED_1000;
1133                 break;
1134         case I40E_LINK_SPEED_10GB:
1135                 link.link_speed = ETH_LINK_SPEED_10G;
1136                 break;
1137         case I40E_LINK_SPEED_20GB:
1138                 link.link_speed = ETH_LINK_SPEED_20G;
1139                 break;
1140         case I40E_LINK_SPEED_40GB:
1141                 link.link_speed = ETH_LINK_SPEED_40G;
1142                 break;
1143         default:
1144                 link.link_speed = ETH_LINK_SPEED_100;
1145                 break;
1146         }
1147
1148 out:
1149         rte_i40e_dev_atomic_write_link_status(dev, &link);
1150         if (link.link_status == old.link_status)
1151                 return -1;
1152
1153         return 0;
1154 }
1155
1156 /* Get all the statistics of a VSI */
1157 void
1158 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1159 {
1160         struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1161         struct i40e_eth_stats *nes = &vsi->eth_stats;
1162         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1163         int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1164
1165         i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1166                             vsi->offset_loaded, &oes->rx_bytes,
1167                             &nes->rx_bytes);
1168         i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1169                             vsi->offset_loaded, &oes->rx_unicast,
1170                             &nes->rx_unicast);
1171         i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1172                             vsi->offset_loaded, &oes->rx_multicast,
1173                             &nes->rx_multicast);
1174         i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1175                             vsi->offset_loaded, &oes->rx_broadcast,
1176                             &nes->rx_broadcast);
1177         i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1178                             &oes->rx_discards, &nes->rx_discards);
1179         /* GLV_REPC not supported */
1180         /* GLV_RMPC not supported */
1181         i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1182                             &oes->rx_unknown_protocol,
1183                             &nes->rx_unknown_protocol);
1184         i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1185                             vsi->offset_loaded, &oes->tx_bytes,
1186                             &nes->tx_bytes);
1187         i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1188                             vsi->offset_loaded, &oes->tx_unicast,
1189                             &nes->tx_unicast);
1190         i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1191                             vsi->offset_loaded, &oes->tx_multicast,
1192                             &nes->tx_multicast);
1193         i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1194                             vsi->offset_loaded,  &oes->tx_broadcast,
1195                             &nes->tx_broadcast);
1196         /* GLV_TDPC not supported */
1197         i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1198                             &oes->tx_errors, &nes->tx_errors);
1199         vsi->offset_loaded = true;
1200
1201         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1202                     vsi->vsi_id);
1203         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", nes->rx_bytes);
1204         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", nes->rx_unicast);
1205         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", nes->rx_multicast);
1206         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", nes->rx_broadcast);
1207         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", nes->rx_discards);
1208         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1209                     nes->rx_unknown_protocol);
1210         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", nes->tx_bytes);
1211         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", nes->tx_unicast);
1212         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", nes->tx_multicast);
1213         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", nes->tx_broadcast);
1214         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", nes->tx_discards);
1215         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", nes->tx_errors);
1216         PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1217                     vsi->vsi_id);
1218 }
1219
1220 /* Get all statistics of a port */
1221 static void
1222 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1223 {
1224         uint32_t i;
1225         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1226         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227         struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1228         struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1229
1230         /* Get statistics of struct i40e_eth_stats */
1231         i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1232                             I40E_GLPRT_GORCL(hw->port),
1233                             pf->offset_loaded, &os->eth.rx_bytes,
1234                             &ns->eth.rx_bytes);
1235         i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1236                             I40E_GLPRT_UPRCL(hw->port),
1237                             pf->offset_loaded, &os->eth.rx_unicast,
1238                             &ns->eth.rx_unicast);
1239         i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1240                             I40E_GLPRT_MPRCL(hw->port),
1241                             pf->offset_loaded, &os->eth.rx_multicast,
1242                             &ns->eth.rx_multicast);
1243         i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1244                             I40E_GLPRT_BPRCL(hw->port),
1245                             pf->offset_loaded, &os->eth.rx_broadcast,
1246                             &ns->eth.rx_broadcast);
1247         i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1248                             pf->offset_loaded, &os->eth.rx_discards,
1249                             &ns->eth.rx_discards);
1250         /* GLPRT_REPC not supported */
1251         /* GLPRT_RMPC not supported */
1252         i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1253                             pf->offset_loaded,
1254                             &os->eth.rx_unknown_protocol,
1255                             &ns->eth.rx_unknown_protocol);
1256         i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1257                             I40E_GLPRT_GOTCL(hw->port),
1258                             pf->offset_loaded, &os->eth.tx_bytes,
1259                             &ns->eth.tx_bytes);
1260         i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1261                             I40E_GLPRT_UPTCL(hw->port),
1262                             pf->offset_loaded, &os->eth.tx_unicast,
1263                             &ns->eth.tx_unicast);
1264         i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1265                             I40E_GLPRT_MPTCL(hw->port),
1266                             pf->offset_loaded, &os->eth.tx_multicast,
1267                             &ns->eth.tx_multicast);
1268         i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1269                             I40E_GLPRT_BPTCL(hw->port),
1270                             pf->offset_loaded, &os->eth.tx_broadcast,
1271                             &ns->eth.tx_broadcast);
1272         /* GLPRT_TEPC not supported */
1273
1274         /* additional port specific stats */
1275         i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1276                             pf->offset_loaded, &os->tx_dropped_link_down,
1277                             &ns->tx_dropped_link_down);
1278         i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1279                             pf->offset_loaded, &os->crc_errors,
1280                             &ns->crc_errors);
1281         i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1282                             pf->offset_loaded, &os->illegal_bytes,
1283                             &ns->illegal_bytes);
1284         /* GLPRT_ERRBC not supported */
1285         i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1286                             pf->offset_loaded, &os->mac_local_faults,
1287                             &ns->mac_local_faults);
1288         i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1289                             pf->offset_loaded, &os->mac_remote_faults,
1290                             &ns->mac_remote_faults);
1291         i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1292                             pf->offset_loaded, &os->rx_length_errors,
1293                             &ns->rx_length_errors);
1294         i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1295                             pf->offset_loaded, &os->link_xon_rx,
1296                             &ns->link_xon_rx);
1297         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1298                             pf->offset_loaded, &os->link_xoff_rx,
1299                             &ns->link_xoff_rx);
1300         for (i = 0; i < 8; i++) {
1301                 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1302                                     pf->offset_loaded,
1303                                     &os->priority_xon_rx[i],
1304                                     &ns->priority_xon_rx[i]);
1305                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1306                                     pf->offset_loaded,
1307                                     &os->priority_xoff_rx[i],
1308                                     &ns->priority_xoff_rx[i]);
1309         }
1310         i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1311                             pf->offset_loaded, &os->link_xon_tx,
1312                             &ns->link_xon_tx);
1313         i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1314                             pf->offset_loaded, &os->link_xoff_tx,
1315                             &ns->link_xoff_tx);
1316         for (i = 0; i < 8; i++) {
1317                 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1318                                     pf->offset_loaded,
1319                                     &os->priority_xon_tx[i],
1320                                     &ns->priority_xon_tx[i]);
1321                 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1322                                     pf->offset_loaded,
1323                                     &os->priority_xoff_tx[i],
1324                                     &ns->priority_xoff_tx[i]);
1325                 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1326                                     pf->offset_loaded,
1327                                     &os->priority_xon_2_xoff[i],
1328                                     &ns->priority_xon_2_xoff[i]);
1329         }
1330         i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1331                             I40E_GLPRT_PRC64L(hw->port),
1332                             pf->offset_loaded, &os->rx_size_64,
1333                             &ns->rx_size_64);
1334         i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1335                             I40E_GLPRT_PRC127L(hw->port),
1336                             pf->offset_loaded, &os->rx_size_127,
1337                             &ns->rx_size_127);
1338         i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1339                             I40E_GLPRT_PRC255L(hw->port),
1340                             pf->offset_loaded, &os->rx_size_255,
1341                             &ns->rx_size_255);
1342         i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1343                             I40E_GLPRT_PRC511L(hw->port),
1344                             pf->offset_loaded, &os->rx_size_511,
1345                             &ns->rx_size_511);
1346         i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1347                             I40E_GLPRT_PRC1023L(hw->port),
1348                             pf->offset_loaded, &os->rx_size_1023,
1349                             &ns->rx_size_1023);
1350         i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1351                             I40E_GLPRT_PRC1522L(hw->port),
1352                             pf->offset_loaded, &os->rx_size_1522,
1353                             &ns->rx_size_1522);
1354         i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1355                             I40E_GLPRT_PRC9522L(hw->port),
1356                             pf->offset_loaded, &os->rx_size_big,
1357                             &ns->rx_size_big);
1358         i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1359                             pf->offset_loaded, &os->rx_undersize,
1360                             &ns->rx_undersize);
1361         i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1362                             pf->offset_loaded, &os->rx_fragments,
1363                             &ns->rx_fragments);
1364         i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1365                             pf->offset_loaded, &os->rx_oversize,
1366                             &ns->rx_oversize);
1367         i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1368                             pf->offset_loaded, &os->rx_jabber,
1369                             &ns->rx_jabber);
1370         i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1371                             I40E_GLPRT_PTC64L(hw->port),
1372                             pf->offset_loaded, &os->tx_size_64,
1373                             &ns->tx_size_64);
1374         i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1375                             I40E_GLPRT_PTC127L(hw->port),
1376                             pf->offset_loaded, &os->tx_size_127,
1377                             &ns->tx_size_127);
1378         i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1379                             I40E_GLPRT_PTC255L(hw->port),
1380                             pf->offset_loaded, &os->tx_size_255,
1381                             &ns->tx_size_255);
1382         i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1383                             I40E_GLPRT_PTC511L(hw->port),
1384                             pf->offset_loaded, &os->tx_size_511,
1385                             &ns->tx_size_511);
1386         i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1387                             I40E_GLPRT_PTC1023L(hw->port),
1388                             pf->offset_loaded, &os->tx_size_1023,
1389                             &ns->tx_size_1023);
1390         i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1391                             I40E_GLPRT_PTC1522L(hw->port),
1392                             pf->offset_loaded, &os->tx_size_1522,
1393                             &ns->tx_size_1522);
1394         i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1395                             I40E_GLPRT_PTC9522L(hw->port),
1396                             pf->offset_loaded, &os->tx_size_big,
1397                             &ns->tx_size_big);
1398         i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
1399                            pf->offset_loaded,
1400                            &os->fd_sb_match, &ns->fd_sb_match);
1401         /* GLPRT_MSPDC not supported */
1402         /* GLPRT_XEC not supported */
1403
1404         pf->offset_loaded = true;
1405
1406         if (pf->main_vsi)
1407                 i40e_update_vsi_stats(pf->main_vsi);
1408
1409         stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1410                                                 ns->eth.rx_broadcast;
1411         stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1412                                                 ns->eth.tx_broadcast;
1413         stats->ibytes   = ns->eth.rx_bytes;
1414         stats->obytes   = ns->eth.tx_bytes;
1415         stats->oerrors  = ns->eth.tx_errors;
1416         stats->imcasts  = ns->eth.rx_multicast;
1417         stats->fdirmatch = ns->fd_sb_match;
1418
1419         /* Rx Errors */
1420         stats->ibadcrc  = ns->crc_errors;
1421         stats->ibadlen  = ns->rx_length_errors + ns->rx_undersize +
1422                         ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1423         stats->imissed  = ns->eth.rx_discards;
1424         stats->ierrors  = stats->ibadcrc + stats->ibadlen + stats->imissed;
1425
1426         PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1427         PMD_DRV_LOG(DEBUG, "rx_bytes:            %"PRIu64"", ns->eth.rx_bytes);
1428         PMD_DRV_LOG(DEBUG, "rx_unicast:          %"PRIu64"", ns->eth.rx_unicast);
1429         PMD_DRV_LOG(DEBUG, "rx_multicast:        %"PRIu64"", ns->eth.rx_multicast);
1430         PMD_DRV_LOG(DEBUG, "rx_broadcast:        %"PRIu64"", ns->eth.rx_broadcast);
1431         PMD_DRV_LOG(DEBUG, "rx_discards:         %"PRIu64"", ns->eth.rx_discards);
1432         PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1433                     ns->eth.rx_unknown_protocol);
1434         PMD_DRV_LOG(DEBUG, "tx_bytes:            %"PRIu64"", ns->eth.tx_bytes);
1435         PMD_DRV_LOG(DEBUG, "tx_unicast:          %"PRIu64"", ns->eth.tx_unicast);
1436         PMD_DRV_LOG(DEBUG, "tx_multicast:        %"PRIu64"", ns->eth.tx_multicast);
1437         PMD_DRV_LOG(DEBUG, "tx_broadcast:        %"PRIu64"", ns->eth.tx_broadcast);
1438         PMD_DRV_LOG(DEBUG, "tx_discards:         %"PRIu64"", ns->eth.tx_discards);
1439         PMD_DRV_LOG(DEBUG, "tx_errors:           %"PRIu64"", ns->eth.tx_errors);
1440
1441         PMD_DRV_LOG(DEBUG, "tx_dropped_link_down:     %"PRIu64"",
1442                     ns->tx_dropped_link_down);
1443         PMD_DRV_LOG(DEBUG, "crc_errors:               %"PRIu64"", ns->crc_errors);
1444         PMD_DRV_LOG(DEBUG, "illegal_bytes:            %"PRIu64"",
1445                     ns->illegal_bytes);
1446         PMD_DRV_LOG(DEBUG, "error_bytes:              %"PRIu64"", ns->error_bytes);
1447         PMD_DRV_LOG(DEBUG, "mac_local_faults:         %"PRIu64"",
1448                     ns->mac_local_faults);
1449         PMD_DRV_LOG(DEBUG, "mac_remote_faults:        %"PRIu64"",
1450                     ns->mac_remote_faults);
1451         PMD_DRV_LOG(DEBUG, "rx_length_errors:         %"PRIu64"",
1452                     ns->rx_length_errors);
1453         PMD_DRV_LOG(DEBUG, "link_xon_rx:              %"PRIu64"", ns->link_xon_rx);
1454         PMD_DRV_LOG(DEBUG, "link_xoff_rx:             %"PRIu64"", ns->link_xoff_rx);
1455         for (i = 0; i < 8; i++) {
1456                 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]:      %"PRIu64"",
1457                                 i, ns->priority_xon_rx[i]);
1458                 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]:     %"PRIu64"",
1459                                 i, ns->priority_xoff_rx[i]);
1460         }
1461         PMD_DRV_LOG(DEBUG, "link_xon_tx:              %"PRIu64"", ns->link_xon_tx);
1462         PMD_DRV_LOG(DEBUG, "link_xoff_tx:             %"PRIu64"", ns->link_xoff_tx);
1463         for (i = 0; i < 8; i++) {
1464                 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]:      %"PRIu64"",
1465                                 i, ns->priority_xon_tx[i]);
1466                 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]:     %"PRIu64"",
1467                                 i, ns->priority_xoff_tx[i]);
1468                 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]:  %"PRIu64"",
1469                                 i, ns->priority_xon_2_xoff[i]);
1470         }
1471         PMD_DRV_LOG(DEBUG, "rx_size_64:               %"PRIu64"", ns->rx_size_64);
1472         PMD_DRV_LOG(DEBUG, "rx_size_127:              %"PRIu64"", ns->rx_size_127);
1473         PMD_DRV_LOG(DEBUG, "rx_size_255:              %"PRIu64"", ns->rx_size_255);
1474         PMD_DRV_LOG(DEBUG, "rx_size_511:              %"PRIu64"", ns->rx_size_511);
1475         PMD_DRV_LOG(DEBUG, "rx_size_1023:             %"PRIu64"", ns->rx_size_1023);
1476         PMD_DRV_LOG(DEBUG, "rx_size_1522:             %"PRIu64"", ns->rx_size_1522);
1477         PMD_DRV_LOG(DEBUG, "rx_size_big:              %"PRIu64"", ns->rx_size_big);
1478         PMD_DRV_LOG(DEBUG, "rx_undersize:             %"PRIu64"", ns->rx_undersize);
1479         PMD_DRV_LOG(DEBUG, "rx_fragments:             %"PRIu64"", ns->rx_fragments);
1480         PMD_DRV_LOG(DEBUG, "rx_oversize:              %"PRIu64"", ns->rx_oversize);
1481         PMD_DRV_LOG(DEBUG, "rx_jabber:                %"PRIu64"", ns->rx_jabber);
1482         PMD_DRV_LOG(DEBUG, "tx_size_64:               %"PRIu64"", ns->tx_size_64);
1483         PMD_DRV_LOG(DEBUG, "tx_size_127:              %"PRIu64"", ns->tx_size_127);
1484         PMD_DRV_LOG(DEBUG, "tx_size_255:              %"PRIu64"", ns->tx_size_255);
1485         PMD_DRV_LOG(DEBUG, "tx_size_511:              %"PRIu64"", ns->tx_size_511);
1486         PMD_DRV_LOG(DEBUG, "tx_size_1023:             %"PRIu64"", ns->tx_size_1023);
1487         PMD_DRV_LOG(DEBUG, "tx_size_1522:             %"PRIu64"", ns->tx_size_1522);
1488         PMD_DRV_LOG(DEBUG, "tx_size_big:              %"PRIu64"", ns->tx_size_big);
1489         PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
1490                         ns->mac_short_packet_dropped);
1491         PMD_DRV_LOG(DEBUG, "checksum_error:           %"PRIu64"",
1492                     ns->checksum_error);
1493         PMD_DRV_LOG(DEBUG, "fdir_match:               %"PRIu64"", ns->fd_sb_match);
1494         PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1495 }
1496
1497 /* Reset the statistics */
1498 static void
1499 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1500 {
1501         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1502
1503         /* It results in reloading the start point of each counter */
1504         pf->offset_loaded = false;
1505 }
1506
1507 static int
1508 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1509                                  __rte_unused uint16_t queue_id,
1510                                  __rte_unused uint8_t stat_idx,
1511                                  __rte_unused uint8_t is_rx)
1512 {
1513         PMD_INIT_FUNC_TRACE();
1514
1515         return -ENOSYS;
1516 }
1517
1518 static void
1519 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1520 {
1521         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1522         struct i40e_vsi *vsi = pf->main_vsi;
1523
1524         dev_info->max_rx_queues = vsi->nb_qps;
1525         dev_info->max_tx_queues = vsi->nb_qps;
1526         dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1527         dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1528         dev_info->max_mac_addrs = vsi->max_macaddrs;
1529         dev_info->max_vfs = dev->pci_dev->max_vfs;
1530         dev_info->rx_offload_capa =
1531                 DEV_RX_OFFLOAD_VLAN_STRIP |
1532                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1533                 DEV_RX_OFFLOAD_UDP_CKSUM |
1534                 DEV_RX_OFFLOAD_TCP_CKSUM;
1535         dev_info->tx_offload_capa =
1536                 DEV_TX_OFFLOAD_VLAN_INSERT |
1537                 DEV_TX_OFFLOAD_IPV4_CKSUM |
1538                 DEV_TX_OFFLOAD_UDP_CKSUM |
1539                 DEV_TX_OFFLOAD_TCP_CKSUM |
1540                 DEV_TX_OFFLOAD_SCTP_CKSUM |
1541                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1542                 DEV_TX_OFFLOAD_TCP_TSO;
1543         dev_info->reta_size = pf->hash_lut_size;
1544         dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1545
1546         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1547                 .rx_thresh = {
1548                         .pthresh = I40E_DEFAULT_RX_PTHRESH,
1549                         .hthresh = I40E_DEFAULT_RX_HTHRESH,
1550                         .wthresh = I40E_DEFAULT_RX_WTHRESH,
1551                 },
1552                 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1553                 .rx_drop_en = 0,
1554         };
1555
1556         dev_info->default_txconf = (struct rte_eth_txconf) {
1557                 .tx_thresh = {
1558                         .pthresh = I40E_DEFAULT_TX_PTHRESH,
1559                         .hthresh = I40E_DEFAULT_TX_HTHRESH,
1560                         .wthresh = I40E_DEFAULT_TX_WTHRESH,
1561                 },
1562                 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1563                 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1564                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1565                                 ETH_TXQ_FLAGS_NOOFFLOADS,
1566         };
1567
1568         if (pf->flags | I40E_FLAG_VMDQ) {
1569                 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1570                 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1571                 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1572                                                 pf->max_nb_vmdq_vsi;
1573                 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1574                 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1575                 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1576         }
1577 }
1578
1579 static int
1580 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1581 {
1582         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1583         struct i40e_vsi *vsi = pf->main_vsi;
1584         PMD_INIT_FUNC_TRACE();
1585
1586         if (on)
1587                 return i40e_vsi_add_vlan(vsi, vlan_id);
1588         else
1589                 return i40e_vsi_delete_vlan(vsi, vlan_id);
1590 }
1591
1592 static void
1593 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1594                    __rte_unused uint16_t tpid)
1595 {
1596         PMD_INIT_FUNC_TRACE();
1597 }
1598
1599 static void
1600 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1601 {
1602         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1603         struct i40e_vsi *vsi = pf->main_vsi;
1604
1605         if (mask & ETH_VLAN_STRIP_MASK) {
1606                 /* Enable or disable VLAN stripping */
1607                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1608                         i40e_vsi_config_vlan_stripping(vsi, TRUE);
1609                 else
1610                         i40e_vsi_config_vlan_stripping(vsi, FALSE);
1611         }
1612
1613         if (mask & ETH_VLAN_EXTEND_MASK) {
1614                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1615                         i40e_vsi_config_double_vlan(vsi, TRUE);
1616                 else
1617                         i40e_vsi_config_double_vlan(vsi, FALSE);
1618         }
1619 }
1620
1621 static void
1622 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1623                           __rte_unused uint16_t queue,
1624                           __rte_unused int on)
1625 {
1626         PMD_INIT_FUNC_TRACE();
1627 }
1628
1629 static int
1630 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1631 {
1632         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1633         struct i40e_vsi *vsi = pf->main_vsi;
1634         struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1635         struct i40e_vsi_vlan_pvid_info info;
1636
1637         memset(&info, 0, sizeof(info));
1638         info.on = on;
1639         if (info.on)
1640                 info.config.pvid = pvid;
1641         else {
1642                 info.config.reject.tagged =
1643                                 data->dev_conf.txmode.hw_vlan_reject_tagged;
1644                 info.config.reject.untagged =
1645                                 data->dev_conf.txmode.hw_vlan_reject_untagged;
1646         }
1647
1648         return i40e_vsi_vlan_pvid_set(vsi, &info);
1649 }
1650
1651 static int
1652 i40e_dev_led_on(struct rte_eth_dev *dev)
1653 {
1654         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655         uint32_t mode = i40e_led_get(hw);
1656
1657         if (mode == 0)
1658                 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1659
1660         return 0;
1661 }
1662
1663 static int
1664 i40e_dev_led_off(struct rte_eth_dev *dev)
1665 {
1666         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1667         uint32_t mode = i40e_led_get(hw);
1668
1669         if (mode != 0)
1670                 i40e_led_set(hw, 0, false);
1671
1672         return 0;
1673 }
1674
1675 static int
1676 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1677                    __rte_unused struct rte_eth_fc_conf *fc_conf)
1678 {
1679         PMD_INIT_FUNC_TRACE();
1680
1681         return -ENOSYS;
1682 }
1683
1684 static int
1685 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1686                             __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1687 {
1688         PMD_INIT_FUNC_TRACE();
1689
1690         return -ENOSYS;
1691 }
1692
1693 /* Add a MAC address, and update filters */
1694 static void
1695 i40e_macaddr_add(struct rte_eth_dev *dev,
1696                  struct ether_addr *mac_addr,
1697                  __rte_unused uint32_t index,
1698                  uint32_t pool)
1699 {
1700         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1701         struct i40e_mac_filter_info mac_filter;
1702         struct i40e_vsi *vsi;
1703         int ret;
1704
1705         /* If VMDQ not enabled or configured, return */
1706         if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1707                 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1708                         pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1709                         pool);
1710                 return;
1711         }
1712
1713         if (pool > pf->nb_cfg_vmdq_vsi) {
1714                 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1715                                 pool, pf->nb_cfg_vmdq_vsi);
1716                 return;
1717         }
1718
1719         (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1720         mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1721
1722         if (pool == 0)
1723                 vsi = pf->main_vsi;
1724         else
1725                 vsi = pf->vmdq[pool - 1].vsi;
1726
1727         ret = i40e_vsi_add_mac(vsi, &mac_filter);
1728         if (ret != I40E_SUCCESS) {
1729                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1730                 return;
1731         }
1732 }
1733
1734 /* Remove a MAC address, and update filters */
1735 static void
1736 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1737 {
1738         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1739         struct i40e_vsi *vsi;
1740         struct rte_eth_dev_data *data = dev->data;
1741         struct ether_addr *macaddr;
1742         int ret;
1743         uint32_t i;
1744         uint64_t pool_sel;
1745
1746         macaddr = &(data->mac_addrs[index]);
1747
1748         pool_sel = dev->data->mac_pool_sel[index];
1749
1750         for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1751                 if (pool_sel & (1ULL << i)) {
1752                         if (i == 0)
1753                                 vsi = pf->main_vsi;
1754                         else {
1755                                 /* No VMDQ pool enabled or configured */
1756                                 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1757                                         (i > pf->nb_cfg_vmdq_vsi)) {
1758                                         PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1759                                                         "/configured");
1760                                         return;
1761                                 }
1762                                 vsi = pf->vmdq[i - 1].vsi;
1763                         }
1764                         ret = i40e_vsi_delete_mac(vsi, macaddr);
1765
1766                         if (ret) {
1767                                 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1768                                 return;
1769                         }
1770                 }
1771         }
1772 }
1773
1774 /* Set perfect match or hash match of MAC and VLAN for a VF */
1775 static int
1776 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1777                  struct rte_eth_mac_filter *filter,
1778                  bool add)
1779 {
1780         struct i40e_hw *hw;
1781         struct i40e_mac_filter_info mac_filter;
1782         struct ether_addr old_mac;
1783         struct ether_addr *new_mac;
1784         struct i40e_pf_vf *vf = NULL;
1785         uint16_t vf_id;
1786         int ret;
1787
1788         if (pf == NULL) {
1789                 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1790                 return -EINVAL;
1791         }
1792         hw = I40E_PF_TO_HW(pf);
1793
1794         if (filter == NULL) {
1795                 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1796                 return -EINVAL;
1797         }
1798
1799         new_mac = &filter->mac_addr;
1800
1801         if (is_zero_ether_addr(new_mac)) {
1802                 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1803                 return -EINVAL;
1804         }
1805
1806         vf_id = filter->dst_id;
1807
1808         if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1809                 PMD_DRV_LOG(ERR, "Invalid argument.");
1810                 return -EINVAL;
1811         }
1812         vf = &pf->vfs[vf_id];
1813
1814         if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1815                 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1816                 return -EINVAL;
1817         }
1818
1819         if (add) {
1820                 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1821                 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1822                                 ETHER_ADDR_LEN);
1823                 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1824                                  ETHER_ADDR_LEN);
1825
1826                 mac_filter.filter_type = filter->filter_type;
1827                 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1828                 if (ret != I40E_SUCCESS) {
1829                         PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1830                         return -1;
1831                 }
1832                 ether_addr_copy(new_mac, &pf->dev_addr);
1833         } else {
1834                 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1835                                 ETHER_ADDR_LEN);
1836                 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1837                 if (ret != I40E_SUCCESS) {
1838                         PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1839                         return -1;
1840                 }
1841
1842                 /* Clear device address as it has been removed */
1843                 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1844                         memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1845         }
1846
1847         return 0;
1848 }
1849
1850 /* MAC filter handle */
1851 static int
1852 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1853                 void *arg)
1854 {
1855         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1856         struct rte_eth_mac_filter *filter;
1857         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1858         int ret = I40E_NOT_SUPPORTED;
1859
1860         filter = (struct rte_eth_mac_filter *)(arg);
1861
1862         switch (filter_op) {
1863         case RTE_ETH_FILTER_NOP:
1864                 ret = I40E_SUCCESS;
1865                 break;
1866         case RTE_ETH_FILTER_ADD:
1867                 i40e_pf_disable_irq0(hw);
1868                 if (filter->is_vf)
1869                         ret = i40e_vf_mac_filter_set(pf, filter, 1);
1870                 i40e_pf_enable_irq0(hw);
1871                 break;
1872         case RTE_ETH_FILTER_DELETE:
1873                 i40e_pf_disable_irq0(hw);
1874                 if (filter->is_vf)
1875                         ret = i40e_vf_mac_filter_set(pf, filter, 0);
1876                 i40e_pf_enable_irq0(hw);
1877                 break;
1878         default:
1879                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1880                 ret = I40E_ERR_PARAM;
1881                 break;
1882         }
1883
1884         return ret;
1885 }
1886
1887 static int
1888 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1889                          struct rte_eth_rss_reta_entry64 *reta_conf,
1890                          uint16_t reta_size)
1891 {
1892         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1893         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1894         uint32_t lut, l;
1895         uint16_t i, j, lut_size = pf->hash_lut_size;
1896         uint16_t idx, shift;
1897         uint8_t mask;
1898
1899         if (reta_size != lut_size ||
1900                 reta_size > ETH_RSS_RETA_SIZE_512) {
1901                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1902                         "(%d) doesn't match the number hardware can supported "
1903                                         "(%d)\n", reta_size, lut_size);
1904                 return -EINVAL;
1905         }
1906
1907         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1908                 idx = i / RTE_RETA_GROUP_SIZE;
1909                 shift = i % RTE_RETA_GROUP_SIZE;
1910                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1911                                                 I40E_4_BIT_MASK);
1912                 if (!mask)
1913                         continue;
1914                 if (mask == I40E_4_BIT_MASK)
1915                         l = 0;
1916                 else
1917                         l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1918                 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1919                         if (mask & (0x1 << j))
1920                                 lut |= reta_conf[idx].reta[shift + j] <<
1921                                                         (CHAR_BIT * j);
1922                         else
1923                                 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1924                 }
1925                 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1926         }
1927
1928         return 0;
1929 }
1930
1931 static int
1932 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1933                         struct rte_eth_rss_reta_entry64 *reta_conf,
1934                         uint16_t reta_size)
1935 {
1936         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1937         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1938         uint32_t lut;
1939         uint16_t i, j, lut_size = pf->hash_lut_size;
1940         uint16_t idx, shift;
1941         uint8_t mask;
1942
1943         if (reta_size != lut_size ||
1944                 reta_size > ETH_RSS_RETA_SIZE_512) {
1945                 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1946                         "(%d) doesn't match the number hardware can supported "
1947                                         "(%d)\n", reta_size, lut_size);
1948                 return -EINVAL;
1949         }
1950
1951         for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1952                 idx = i / RTE_RETA_GROUP_SIZE;
1953                 shift = i % RTE_RETA_GROUP_SIZE;
1954                 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1955                                                 I40E_4_BIT_MASK);
1956                 if (!mask)
1957                         continue;
1958
1959                 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1960                 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
1961                         if (mask & (0x1 << j))
1962                                 reta_conf[idx].reta[shift + j] = ((lut >>
1963                                         (CHAR_BIT * j)) & I40E_8_BIT_MASK);
1964                 }
1965         }
1966
1967         return 0;
1968 }
1969
1970 /**
1971  * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1972  * @hw:   pointer to the HW structure
1973  * @mem:  pointer to mem struct to fill out
1974  * @size: size of memory requested
1975  * @alignment: what to align the allocation to
1976  **/
1977 enum i40e_status_code
1978 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1979                         struct i40e_dma_mem *mem,
1980                         u64 size,
1981                         u32 alignment)
1982 {
1983         static uint64_t id = 0;
1984         const struct rte_memzone *mz = NULL;
1985         char z_name[RTE_MEMZONE_NAMESIZE];
1986
1987         if (!mem)
1988                 return I40E_ERR_PARAM;
1989
1990         id++;
1991         snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1992 #ifdef RTE_LIBRTE_XEN_DOM0
1993         mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1994                                                         RTE_PGSIZE_2M);
1995 #else
1996         mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1997 #endif
1998         if (!mz)
1999                 return I40E_ERR_NO_MEMORY;
2000
2001         mem->id = id;
2002         mem->size = size;
2003         mem->va = mz->addr;
2004 #ifdef RTE_LIBRTE_XEN_DOM0
2005         mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2006 #else
2007         mem->pa = mz->phys_addr;
2008 #endif
2009
2010         return I40E_SUCCESS;
2011 }
2012
2013 /**
2014  * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2015  * @hw:   pointer to the HW structure
2016  * @mem:  ptr to mem struct to free
2017  **/
2018 enum i40e_status_code
2019 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2020                     struct i40e_dma_mem *mem)
2021 {
2022         if (!mem || !mem->va)
2023                 return I40E_ERR_PARAM;
2024
2025         mem->va = NULL;
2026         mem->pa = (u64)0;
2027
2028         return I40E_SUCCESS;
2029 }
2030
2031 /**
2032  * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2033  * @hw:   pointer to the HW structure
2034  * @mem:  pointer to mem struct to fill out
2035  * @size: size of memory requested
2036  **/
2037 enum i40e_status_code
2038 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2039                          struct i40e_virt_mem *mem,
2040                          u32 size)
2041 {
2042         if (!mem)
2043                 return I40E_ERR_PARAM;
2044
2045         mem->size = size;
2046         mem->va = rte_zmalloc("i40e", size, 0);
2047
2048         if (mem->va)
2049                 return I40E_SUCCESS;
2050         else
2051                 return I40E_ERR_NO_MEMORY;
2052 }
2053
2054 /**
2055  * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2056  * @hw:   pointer to the HW structure
2057  * @mem:  pointer to mem struct to free
2058  **/
2059 enum i40e_status_code
2060 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2061                      struct i40e_virt_mem *mem)
2062 {
2063         if (!mem)
2064                 return I40E_ERR_PARAM;
2065
2066         rte_free(mem->va);
2067         mem->va = NULL;
2068
2069         return I40E_SUCCESS;
2070 }
2071
2072 void
2073 i40e_init_spinlock_d(struct i40e_spinlock *sp)
2074 {
2075         rte_spinlock_init(&sp->spinlock);
2076 }
2077
2078 void
2079 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
2080 {
2081         rte_spinlock_lock(&sp->spinlock);
2082 }
2083
2084 void
2085 i40e_release_spinlock_d(struct i40e_spinlock *sp)
2086 {
2087         rte_spinlock_unlock(&sp->spinlock);
2088 }
2089
2090 void
2091 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
2092 {
2093         return;
2094 }
2095
2096 /**
2097  * Get the hardware capabilities, which will be parsed
2098  * and saved into struct i40e_hw.
2099  */
2100 static int
2101 i40e_get_cap(struct i40e_hw *hw)
2102 {
2103         struct i40e_aqc_list_capabilities_element_resp *buf;
2104         uint16_t len, size = 0;
2105         int ret;
2106
2107         /* Calculate a huge enough buff for saving response data temporarily */
2108         len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
2109                                                 I40E_MAX_CAP_ELE_NUM;
2110         buf = rte_zmalloc("i40e", len, 0);
2111         if (!buf) {
2112                 PMD_DRV_LOG(ERR, "Failed to allocate memory");
2113                 return I40E_ERR_NO_MEMORY;
2114         }
2115
2116         /* Get, parse the capabilities and save it to hw */
2117         ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
2118                         i40e_aqc_opc_list_func_capabilities, NULL);
2119         if (ret != I40E_SUCCESS)
2120                 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
2121
2122         /* Free the temporary buffer after being used */
2123         rte_free(buf);
2124
2125         return ret;
2126 }
2127
2128 static int
2129 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2130 {
2131         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2132         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2133         uint16_t sum_queues = 0, sum_vsis, left_queues;
2134
2135         /* First check if FW support SRIOV */
2136         if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2137                 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2138                 return -EINVAL;
2139         }
2140
2141         pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2142         pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2143         PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2144         /* Allocate queues for pf */
2145         if (hw->func_caps.rss) {
2146                 pf->flags |= I40E_FLAG_RSS;
2147                 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2148                         (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2149                 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2150         } else
2151                 pf->lan_nb_qps = 1;
2152         sum_queues = pf->lan_nb_qps;
2153         /* Default VSI is not counted in */
2154         sum_vsis = 0;
2155         PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2156
2157         if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2158                 pf->flags |= I40E_FLAG_SRIOV;
2159                 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2160                 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2161                         PMD_INIT_LOG(ERR, "Config VF number %u, "
2162                                      "max supported %u.",
2163                                      dev->pci_dev->max_vfs,
2164                                      hw->func_caps.num_vfs);
2165                         return -EINVAL;
2166                 }
2167                 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2168                         PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2169                                      "max support %u queues.",
2170                                      pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2171                         return -EINVAL;
2172                 }
2173                 pf->vf_num = dev->pci_dev->max_vfs;
2174                 sum_queues += pf->vf_nb_qps * pf->vf_num;
2175                 sum_vsis   += pf->vf_num;
2176                 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2177                              pf->vf_num, pf->vf_nb_qps);
2178         } else
2179                 pf->vf_num = 0;
2180
2181         if (hw->func_caps.vmdq) {
2182                 pf->flags |= I40E_FLAG_VMDQ;
2183                 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2184                 pf->max_nb_vmdq_vsi = 1;
2185                 /*
2186                  * If VMDQ available, assume a single VSI can be created.  Will adjust
2187                  * later.
2188                  */
2189                 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2190                 sum_vsis += pf->max_nb_vmdq_vsi;
2191         } else {
2192                 pf->vmdq_nb_qps = 0;
2193                 pf->max_nb_vmdq_vsi = 0;
2194         }
2195         pf->nb_cfg_vmdq_vsi = 0;
2196
2197         if (hw->func_caps.fd) {
2198                 pf->flags |= I40E_FLAG_FDIR;
2199                 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2200                 /**
2201                  * Each flow director consumes one VSI and one queue,
2202                  * but can't calculate out predictably here.
2203                  */
2204         }
2205
2206         if (sum_vsis > pf->max_num_vsi ||
2207                 sum_queues > hw->func_caps.num_rx_qp) {
2208                 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2209                 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2210                              pf->max_num_vsi, sum_vsis);
2211                 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2212                              hw->func_caps.num_rx_qp, sum_queues);
2213                 return -EINVAL;
2214         }
2215
2216         /* Adjust VMDQ setting to support as many VMs as possible */
2217         if (pf->flags & I40E_FLAG_VMDQ) {
2218                 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2219
2220                 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2221                                         pf->max_num_vsi - sum_vsis);
2222
2223                 /* Limit the max VMDQ number that rte_ether that can support  */
2224                 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2225                                         ETH_64_POOLS - 1);
2226
2227                 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2228                                 pf->max_nb_vmdq_vsi);
2229                 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2230         }
2231
2232         /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2233          * cause */
2234         if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2235                 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2236                              sum_vsis, hw->func_caps.num_msix_vectors);
2237                 return -EINVAL;
2238         }
2239         return I40E_SUCCESS;
2240 }
2241
2242 static int
2243 i40e_pf_get_switch_config(struct i40e_pf *pf)
2244 {
2245         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2246         struct i40e_aqc_get_switch_config_resp *switch_config;
2247         struct i40e_aqc_switch_config_element_resp *element;
2248         uint16_t start_seid = 0, num_reported;
2249         int ret;
2250
2251         switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2252                         rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2253         if (!switch_config) {
2254                 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2255                 return -ENOMEM;
2256         }
2257
2258         /* Get the switch configurations */
2259         ret = i40e_aq_get_switch_config(hw, switch_config,
2260                 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2261         if (ret != I40E_SUCCESS) {
2262                 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2263                 goto fail;
2264         }
2265         num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2266         if (num_reported != 1) { /* The number should be 1 */
2267                 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2268                 goto fail;
2269         }
2270
2271         /* Parse the switch configuration elements */
2272         element = &(switch_config->element[0]);
2273         if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2274                 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2275                 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2276         } else
2277                 PMD_DRV_LOG(INFO, "Unknown element type");
2278
2279 fail:
2280         rte_free(switch_config);
2281
2282         return ret;
2283 }
2284
2285 static int
2286 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2287                         uint32_t num)
2288 {
2289         struct pool_entry *entry;
2290
2291         if (pool == NULL || num == 0)
2292                 return -EINVAL;
2293
2294         entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2295         if (entry == NULL) {
2296                 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2297                 return -ENOMEM;
2298         }
2299
2300         /* queue heap initialize */
2301         pool->num_free = num;
2302         pool->num_alloc = 0;
2303         pool->base = base;
2304         LIST_INIT(&pool->alloc_list);
2305         LIST_INIT(&pool->free_list);
2306
2307         /* Initialize element  */
2308         entry->base = 0;
2309         entry->len = num;
2310
2311         LIST_INSERT_HEAD(&pool->free_list, entry, next);
2312         return 0;
2313 }
2314
2315 static void
2316 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2317 {
2318         struct pool_entry *entry;
2319
2320         if (pool == NULL)
2321                 return;
2322
2323         LIST_FOREACH(entry, &pool->alloc_list, next) {
2324                 LIST_REMOVE(entry, next);
2325                 rte_free(entry);
2326         }
2327
2328         LIST_FOREACH(entry, &pool->free_list, next) {
2329                 LIST_REMOVE(entry, next);
2330                 rte_free(entry);
2331         }
2332
2333         pool->num_free = 0;
2334         pool->num_alloc = 0;
2335         pool->base = 0;
2336         LIST_INIT(&pool->alloc_list);
2337         LIST_INIT(&pool->free_list);
2338 }
2339
2340 static int
2341 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2342                        uint32_t base)
2343 {
2344         struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2345         uint32_t pool_offset;
2346         int insert;
2347
2348         if (pool == NULL) {
2349                 PMD_DRV_LOG(ERR, "Invalid parameter");
2350                 return -EINVAL;
2351         }
2352
2353         pool_offset = base - pool->base;
2354         /* Lookup in alloc list */
2355         LIST_FOREACH(entry, &pool->alloc_list, next) {
2356                 if (entry->base == pool_offset) {
2357                         valid_entry = entry;
2358                         LIST_REMOVE(entry, next);
2359                         break;
2360                 }
2361         }
2362
2363         /* Not find, return */
2364         if (valid_entry == NULL) {
2365                 PMD_DRV_LOG(ERR, "Failed to find entry");
2366                 return -EINVAL;
2367         }
2368
2369         /**
2370          * Found it, move it to free list  and try to merge.
2371          * In order to make merge easier, always sort it by qbase.
2372          * Find adjacent prev and last entries.
2373          */
2374         prev = next = NULL;
2375         LIST_FOREACH(entry, &pool->free_list, next) {
2376                 if (entry->base > valid_entry->base) {
2377                         next = entry;
2378                         break;
2379                 }
2380                 prev = entry;
2381         }
2382
2383         insert = 0;
2384         /* Try to merge with next one*/
2385         if (next != NULL) {
2386                 /* Merge with next one */
2387                 if (valid_entry->base + valid_entry->len == next->base) {
2388                         next->base = valid_entry->base;
2389                         next->len += valid_entry->len;
2390                         rte_free(valid_entry);
2391                         valid_entry = next;
2392                         insert = 1;
2393                 }
2394         }
2395
2396         if (prev != NULL) {
2397                 /* Merge with previous one */
2398                 if (prev->base + prev->len == valid_entry->base) {
2399                         prev->len += valid_entry->len;
2400                         /* If it merge with next one, remove next node */
2401                         if (insert == 1) {
2402                                 LIST_REMOVE(valid_entry, next);
2403                                 rte_free(valid_entry);
2404                         } else {
2405                                 rte_free(valid_entry);
2406                                 insert = 1;
2407                         }
2408                 }
2409         }
2410
2411         /* Not find any entry to merge, insert */
2412         if (insert == 0) {
2413                 if (prev != NULL)
2414                         LIST_INSERT_AFTER(prev, valid_entry, next);
2415                 else if (next != NULL)
2416                         LIST_INSERT_BEFORE(next, valid_entry, next);
2417                 else /* It's empty list, insert to head */
2418                         LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2419         }
2420
2421         pool->num_free += valid_entry->len;
2422         pool->num_alloc -= valid_entry->len;
2423
2424         return 0;
2425 }
2426
2427 static int
2428 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2429                        uint16_t num)
2430 {
2431         struct pool_entry *entry, *valid_entry;
2432
2433         if (pool == NULL || num == 0) {
2434                 PMD_DRV_LOG(ERR, "Invalid parameter");
2435                 return -EINVAL;
2436         }
2437
2438         if (pool->num_free < num) {
2439                 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2440                             num, pool->num_free);
2441                 return -ENOMEM;
2442         }
2443
2444         valid_entry = NULL;
2445         /* Lookup  in free list and find most fit one */
2446         LIST_FOREACH(entry, &pool->free_list, next) {
2447                 if (entry->len >= num) {
2448                         /* Find best one */
2449                         if (entry->len == num) {
2450                                 valid_entry = entry;
2451                                 break;
2452                         }
2453                         if (valid_entry == NULL || valid_entry->len > entry->len)
2454                                 valid_entry = entry;
2455                 }
2456         }
2457
2458         /* Not find one to satisfy the request, return */
2459         if (valid_entry == NULL) {
2460                 PMD_DRV_LOG(ERR, "No valid entry found");
2461                 return -ENOMEM;
2462         }
2463         /**
2464          * The entry have equal queue number as requested,
2465          * remove it from alloc_list.
2466          */
2467         if (valid_entry->len == num) {
2468                 LIST_REMOVE(valid_entry, next);
2469         } else {
2470                 /**
2471                  * The entry have more numbers than requested,
2472                  * create a new entry for alloc_list and minus its
2473                  * queue base and number in free_list.
2474                  */
2475                 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2476                 if (entry == NULL) {
2477                         PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2478                                     "resource pool");
2479                         return -ENOMEM;
2480                 }
2481                 entry->base = valid_entry->base;
2482                 entry->len = num;
2483                 valid_entry->base += num;
2484                 valid_entry->len -= num;
2485                 valid_entry = entry;
2486         }
2487
2488         /* Insert it into alloc list, not sorted */
2489         LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2490
2491         pool->num_free -= valid_entry->len;
2492         pool->num_alloc += valid_entry->len;
2493
2494         return (valid_entry->base + pool->base);
2495 }
2496
2497 /**
2498  * bitmap_is_subset - Check whether src2 is subset of src1
2499  **/
2500 static inline int
2501 bitmap_is_subset(uint8_t src1, uint8_t src2)
2502 {
2503         return !((src1 ^ src2) & src2);
2504 }
2505
2506 static int
2507 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2508 {
2509         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2510
2511         /* If DCB is not supported, only default TC is supported */
2512         if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2513                 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2514                 return -EINVAL;
2515         }
2516
2517         if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2518                 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2519                             "HW support 0x%x", hw->func_caps.enabled_tcmap,
2520                             enabled_tcmap);
2521                 return -EINVAL;
2522         }
2523         return I40E_SUCCESS;
2524 }
2525
2526 int
2527 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2528                                 struct i40e_vsi_vlan_pvid_info *info)
2529 {
2530         struct i40e_hw *hw;
2531         struct i40e_vsi_context ctxt;
2532         uint8_t vlan_flags = 0;
2533         int ret;
2534
2535         if (vsi == NULL || info == NULL) {
2536                 PMD_DRV_LOG(ERR, "invalid parameters");
2537                 return I40E_ERR_PARAM;
2538         }
2539
2540         if (info->on) {
2541                 vsi->info.pvid = info->config.pvid;
2542                 /**
2543                  * If insert pvid is enabled, only tagged pkts are
2544                  * allowed to be sent out.
2545                  */
2546                 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2547                                 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2548         } else {
2549                 vsi->info.pvid = 0;
2550                 if (info->config.reject.tagged == 0)
2551                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2552
2553                 if (info->config.reject.untagged == 0)
2554                         vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2555         }
2556         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2557                                         I40E_AQ_VSI_PVLAN_MODE_MASK);
2558         vsi->info.port_vlan_flags |= vlan_flags;
2559         vsi->info.valid_sections =
2560                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2561         memset(&ctxt, 0, sizeof(ctxt));
2562         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2563         ctxt.seid = vsi->seid;
2564
2565         hw = I40E_VSI_TO_HW(vsi);
2566         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2567         if (ret != I40E_SUCCESS)
2568                 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2569
2570         return ret;
2571 }
2572
2573 static int
2574 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2575 {
2576         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2577         int i, ret;
2578         struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2579
2580         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2581         if (ret != I40E_SUCCESS)
2582                 return ret;
2583
2584         if (!vsi->seid) {
2585                 PMD_DRV_LOG(ERR, "seid not valid");
2586                 return -EINVAL;
2587         }
2588
2589         memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2590         tc_bw_data.tc_valid_bits = enabled_tcmap;
2591         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2592                 tc_bw_data.tc_bw_credits[i] =
2593                         (enabled_tcmap & (1 << i)) ? 1 : 0;
2594
2595         ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2596         if (ret != I40E_SUCCESS) {
2597                 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2598                 return ret;
2599         }
2600
2601         (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2602                                         sizeof(vsi->info.qs_handle));
2603         return I40E_SUCCESS;
2604 }
2605
2606 static int
2607 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2608                                  struct i40e_aqc_vsi_properties_data *info,
2609                                  uint8_t enabled_tcmap)
2610 {
2611         int ret, total_tc = 0, i;
2612         uint16_t qpnum_per_tc, bsf, qp_idx;
2613
2614         ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2615         if (ret != I40E_SUCCESS)
2616                 return ret;
2617
2618         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2619                 if (enabled_tcmap & (1 << i))
2620                         total_tc++;
2621         vsi->enabled_tc = enabled_tcmap;
2622
2623         /* Number of queues per enabled TC */
2624         qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2625         qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2626         bsf = rte_bsf32(qpnum_per_tc);
2627
2628         /* Adjust the queue number to actual queues that can be applied */
2629         vsi->nb_qps = qpnum_per_tc * total_tc;
2630
2631         /**
2632          * Configure TC and queue mapping parameters, for enabled TC,
2633          * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2634          * default queue will serve it.
2635          */
2636         qp_idx = 0;
2637         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2638                 if (vsi->enabled_tc & (1 << i)) {
2639                         info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2640                                         I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2641                                 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2642                         qp_idx += qpnum_per_tc;
2643                 } else
2644                         info->tc_mapping[i] = 0;
2645         }
2646
2647         /* Associate queue number with VSI */
2648         if (vsi->type == I40E_VSI_SRIOV) {
2649                 info->mapping_flags |=
2650                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2651                 for (i = 0; i < vsi->nb_qps; i++)
2652                         info->queue_mapping[i] =
2653                                 rte_cpu_to_le_16(vsi->base_queue + i);
2654         } else {
2655                 info->mapping_flags |=
2656                         rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2657                 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2658         }
2659         info->valid_sections |=
2660                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2661
2662         return I40E_SUCCESS;
2663 }
2664
2665 static int
2666 i40e_veb_release(struct i40e_veb *veb)
2667 {
2668         struct i40e_vsi *vsi;
2669         struct i40e_hw *hw;
2670
2671         if (veb == NULL || veb->associate_vsi == NULL)
2672                 return -EINVAL;
2673
2674         if (!TAILQ_EMPTY(&veb->head)) {
2675                 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2676                 return -EACCES;
2677         }
2678
2679         vsi = veb->associate_vsi;
2680         hw = I40E_VSI_TO_HW(vsi);
2681
2682         vsi->uplink_seid = veb->uplink_seid;
2683         i40e_aq_delete_element(hw, veb->seid, NULL);
2684         rte_free(veb);
2685         vsi->veb = NULL;
2686         return I40E_SUCCESS;
2687 }
2688
2689 /* Setup a veb */
2690 static struct i40e_veb *
2691 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2692 {
2693         struct i40e_veb *veb;
2694         int ret;
2695         struct i40e_hw *hw;
2696
2697         if (NULL == pf || vsi == NULL) {
2698                 PMD_DRV_LOG(ERR, "veb setup failed, "
2699                             "associated VSI shouldn't null");
2700                 return NULL;
2701         }
2702         hw = I40E_PF_TO_HW(pf);
2703
2704         veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2705         if (!veb) {
2706                 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2707                 goto fail;
2708         }
2709
2710         veb->associate_vsi = vsi;
2711         TAILQ_INIT(&veb->head);
2712         veb->uplink_seid = vsi->uplink_seid;
2713
2714         ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2715                 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2716
2717         if (ret != I40E_SUCCESS) {
2718                 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2719                             hw->aq.asq_last_status);
2720                 goto fail;
2721         }
2722
2723         /* get statistics index */
2724         ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2725                                 &veb->stats_idx, NULL, NULL, NULL);
2726         if (ret != I40E_SUCCESS) {
2727                 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2728                             hw->aq.asq_last_status);
2729                 goto fail;
2730         }
2731
2732         /* Get VEB bandwidth, to be implemented */
2733         /* Now associated vsi binding to the VEB, set uplink to this VEB */
2734         vsi->uplink_seid = veb->seid;
2735
2736         return veb;
2737 fail:
2738         rte_free(veb);
2739         return NULL;
2740 }
2741
2742 int
2743 i40e_vsi_release(struct i40e_vsi *vsi)
2744 {
2745         struct i40e_pf *pf;
2746         struct i40e_hw *hw;
2747         struct i40e_vsi_list *vsi_list;
2748         int ret;
2749         struct i40e_mac_filter *f;
2750
2751         if (!vsi)
2752                 return I40E_SUCCESS;
2753
2754         pf = I40E_VSI_TO_PF(vsi);
2755         hw = I40E_VSI_TO_HW(vsi);
2756
2757         /* VSI has child to attach, release child first */
2758         if (vsi->veb) {
2759                 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2760                         if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2761                                 return -1;
2762                         TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2763                 }
2764                 i40e_veb_release(vsi->veb);
2765         }
2766
2767         /* Remove all macvlan filters of the VSI */
2768         i40e_vsi_remove_all_macvlan_filter(vsi);
2769         TAILQ_FOREACH(f, &vsi->mac_list, next)
2770                 rte_free(f);
2771
2772         if (vsi->type != I40E_VSI_MAIN) {
2773                 /* Remove vsi from parent's sibling list */
2774                 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2775                         PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2776                         return I40E_ERR_PARAM;
2777                 }
2778                 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2779                                 &vsi->sib_vsi_list, list);
2780
2781                 /* Remove all switch element of the VSI */
2782                 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2783                 if (ret != I40E_SUCCESS)
2784                         PMD_DRV_LOG(ERR, "Failed to delete element");
2785         }
2786         i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2787
2788         if (vsi->type != I40E_VSI_SRIOV)
2789                 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2790         rte_free(vsi);
2791
2792         return I40E_SUCCESS;
2793 }
2794
2795 static int
2796 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2797 {
2798         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2799         struct i40e_aqc_remove_macvlan_element_data def_filter;
2800         struct i40e_mac_filter_info filter;
2801         int ret;
2802
2803         if (vsi->type != I40E_VSI_MAIN)
2804                 return I40E_ERR_CONFIG;
2805         memset(&def_filter, 0, sizeof(def_filter));
2806         (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2807                                         ETH_ADDR_LEN);
2808         def_filter.vlan_tag = 0;
2809         def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2810                                 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2811         ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2812         if (ret != I40E_SUCCESS) {
2813                 struct i40e_mac_filter *f;
2814                 struct ether_addr *mac;
2815
2816                 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2817                             "macvlan filter");
2818                 /* It needs to add the permanent mac into mac list */
2819                 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2820                 if (f == NULL) {
2821                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2822                         return I40E_ERR_NO_MEMORY;
2823                 }
2824                 mac = &f->mac_info.mac_addr;
2825                 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2826                                 ETH_ADDR_LEN);
2827                 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2828                 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2829                 vsi->mac_num++;
2830
2831                 return ret;
2832         }
2833         (void)rte_memcpy(&filter.mac_addr,
2834                 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2835         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2836         return i40e_vsi_add_mac(vsi, &filter);
2837 }
2838
2839 static int
2840 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2841 {
2842         struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2843         struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2844         struct i40e_hw *hw = &vsi->adapter->hw;
2845         i40e_status ret;
2846         int i;
2847
2848         memset(&bw_config, 0, sizeof(bw_config));
2849         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2850         if (ret != I40E_SUCCESS) {
2851                 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2852                             hw->aq.asq_last_status);
2853                 return ret;
2854         }
2855
2856         memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2857         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2858                                         &ets_sla_config, NULL);
2859         if (ret != I40E_SUCCESS) {
2860                 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2861                             "configuration %u", hw->aq.asq_last_status);
2862                 return ret;
2863         }
2864
2865         /* Not store the info yet, just print out */
2866         PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2867         PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2868         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2869                 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2870                             ets_sla_config.share_credits[i]);
2871                 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2872                             rte_le_to_cpu_16(ets_sla_config.credits[i]));
2873                 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2874                             rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2875                             (i * 4));
2876         }
2877
2878         return 0;
2879 }
2880
2881 /* Setup a VSI */
2882 struct i40e_vsi *
2883 i40e_vsi_setup(struct i40e_pf *pf,
2884                enum i40e_vsi_type type,
2885                struct i40e_vsi *uplink_vsi,
2886                uint16_t user_param)
2887 {
2888         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2889         struct i40e_vsi *vsi;
2890         struct i40e_mac_filter_info filter;
2891         int ret;
2892         struct i40e_vsi_context ctxt;
2893         struct ether_addr broadcast =
2894                 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2895
2896         if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2897                 PMD_DRV_LOG(ERR, "VSI setup failed, "
2898                             "VSI link shouldn't be NULL");
2899                 return NULL;
2900         }
2901
2902         if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2903                 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2904                             "uplink VSI should be NULL");
2905                 return NULL;
2906         }
2907
2908         /* If uplink vsi didn't setup VEB, create one first */
2909         if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2910                 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2911
2912                 if (NULL == uplink_vsi->veb) {
2913                         PMD_DRV_LOG(ERR, "VEB setup failed");
2914                         return NULL;
2915                 }
2916         }
2917
2918         vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2919         if (!vsi) {
2920                 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2921                 return NULL;
2922         }
2923         TAILQ_INIT(&vsi->mac_list);
2924         vsi->type = type;
2925         vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2926         vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2927         vsi->parent_vsi = uplink_vsi;
2928         vsi->user_param = user_param;
2929         /* Allocate queues */
2930         switch (vsi->type) {
2931         case I40E_VSI_MAIN  :
2932                 vsi->nb_qps = pf->lan_nb_qps;
2933                 break;
2934         case I40E_VSI_SRIOV :
2935                 vsi->nb_qps = pf->vf_nb_qps;
2936                 break;
2937         case I40E_VSI_VMDQ2:
2938                 vsi->nb_qps = pf->vmdq_nb_qps;
2939                 break;
2940         case I40E_VSI_FDIR:
2941                 vsi->nb_qps = pf->fdir_nb_qps;
2942                 break;
2943         default:
2944                 goto fail_mem;
2945         }
2946         /*
2947          * The filter status descriptor is reported in rx queue 0,
2948          * while the tx queue for fdir filter programming has no
2949          * such constraints, can be non-zero queues.
2950          * To simplify it, choose FDIR vsi use queue 0 pair.
2951          * To make sure it will use queue 0 pair, queue allocation
2952          * need be done before this function is called
2953          */
2954         if (type != I40E_VSI_FDIR) {
2955                 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2956                         if (ret < 0) {
2957                                 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2958                                                 vsi->seid, ret);
2959                                 goto fail_mem;
2960                         }
2961                         vsi->base_queue = ret;
2962         } else
2963                 vsi->base_queue = I40E_FDIR_QUEUE_ID;
2964
2965         /* VF has MSIX interrupt in VF range, don't allocate here */
2966         if (type != I40E_VSI_SRIOV) {
2967                 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2968                 if (ret < 0) {
2969                         PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2970                         goto fail_queue_alloc;
2971                 }
2972                 vsi->msix_intr = ret;
2973         } else
2974                 vsi->msix_intr = 0;
2975         /* Add VSI */
2976         if (type == I40E_VSI_MAIN) {
2977                 /* For main VSI, no need to add since it's default one */
2978                 vsi->uplink_seid = pf->mac_seid;
2979                 vsi->seid = pf->main_vsi_seid;
2980                 /* Bind queues with specific MSIX interrupt */
2981                 /**
2982                  * Needs 2 interrupt at least, one for misc cause which will
2983                  * enabled from OS side, Another for queues binding the
2984                  * interrupt from device side only.
2985                  */
2986
2987                 /* Get default VSI parameters from hardware */
2988                 memset(&ctxt, 0, sizeof(ctxt));
2989                 ctxt.seid = vsi->seid;
2990                 ctxt.pf_num = hw->pf_id;
2991                 ctxt.uplink_seid = vsi->uplink_seid;
2992                 ctxt.vf_num = 0;
2993                 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2994                 if (ret != I40E_SUCCESS) {
2995                         PMD_DRV_LOG(ERR, "Failed to get VSI params");
2996                         goto fail_msix_alloc;
2997                 }
2998                 (void)rte_memcpy(&vsi->info, &ctxt.info,
2999                         sizeof(struct i40e_aqc_vsi_properties_data));
3000                 vsi->vsi_id = ctxt.vsi_number;
3001                 vsi->info.valid_sections = 0;
3002
3003                 /* Configure tc, enabled TC0 only */
3004                 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3005                         I40E_SUCCESS) {
3006                         PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3007                         goto fail_msix_alloc;
3008                 }
3009
3010                 /* TC, queue mapping */
3011                 memset(&ctxt, 0, sizeof(ctxt));
3012                 vsi->info.valid_sections |=
3013                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3014                 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3015                                         I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3016                 (void)rte_memcpy(&ctxt.info, &vsi->info,
3017                         sizeof(struct i40e_aqc_vsi_properties_data));
3018                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3019                                                 I40E_DEFAULT_TCMAP);
3020                 if (ret != I40E_SUCCESS) {
3021                         PMD_DRV_LOG(ERR, "Failed to configure "
3022                                     "TC queue mapping");
3023                         goto fail_msix_alloc;
3024                 }
3025                 ctxt.seid = vsi->seid;
3026                 ctxt.pf_num = hw->pf_id;
3027                 ctxt.uplink_seid = vsi->uplink_seid;
3028                 ctxt.vf_num = 0;
3029
3030                 /* Update VSI parameters */
3031                 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3032                 if (ret != I40E_SUCCESS) {
3033                         PMD_DRV_LOG(ERR, "Failed to update VSI params");
3034                         goto fail_msix_alloc;
3035                 }
3036
3037                 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3038                                                 sizeof(vsi->info.tc_mapping));
3039                 (void)rte_memcpy(&vsi->info.queue_mapping,
3040                                 &ctxt.info.queue_mapping,
3041                         sizeof(vsi->info.queue_mapping));
3042                 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3043                 vsi->info.valid_sections = 0;
3044
3045                 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3046                                 ETH_ADDR_LEN);
3047
3048                 /**
3049                  * Updating default filter settings are necessary to prevent
3050                  * reception of tagged packets.
3051                  * Some old firmware configurations load a default macvlan
3052                  * filter which accepts both tagged and untagged packets.
3053                  * The updating is to use a normal filter instead if needed.
3054                  * For NVM 4.2.2 or after, the updating is not needed anymore.
3055                  * The firmware with correct configurations load the default
3056                  * macvlan filter which is expected and cannot be removed.
3057                  */
3058                 i40e_update_default_filter_setting(vsi);
3059         } else if (type == I40E_VSI_SRIOV) {
3060                 memset(&ctxt, 0, sizeof(ctxt));
3061                 /**
3062                  * For other VSI, the uplink_seid equals to uplink VSI's
3063                  * uplink_seid since they share same VEB
3064                  */
3065                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3066                 ctxt.pf_num = hw->pf_id;
3067                 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
3068                 ctxt.uplink_seid = vsi->uplink_seid;
3069                 ctxt.connection_type = 0x1;
3070                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
3071
3072                 /**
3073                  * Do not configure switch ID to enable VEB switch by
3074                  * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
3075                  * if the source mac address of packet sent from VF is not
3076                  * listed in the VEB's mac table, the VEB will switch the
3077                  * packet back to the VF. Need to enable it when HW issue
3078                  * is fixed.
3079                  */
3080
3081                 /* Configure port/vlan */
3082                 ctxt.info.valid_sections |=
3083                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3084                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3085                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3086                                                 I40E_DEFAULT_TCMAP);
3087                 if (ret != I40E_SUCCESS) {
3088                         PMD_DRV_LOG(ERR, "Failed to configure "
3089                                     "TC queue mapping");
3090                         goto fail_msix_alloc;
3091                 }
3092                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3093                 ctxt.info.valid_sections |=
3094                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3095                 /**
3096                  * Since VSI is not created yet, only configure parameter,
3097                  * will add vsi below.
3098                  */
3099         } else if (type == I40E_VSI_VMDQ2) {
3100                 memset(&ctxt, 0, sizeof(ctxt));
3101                 /*
3102                  * For other VSI, the uplink_seid equals to uplink VSI's
3103                  * uplink_seid since they share same VEB
3104                  */
3105                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3106                 ctxt.pf_num = hw->pf_id;
3107                 ctxt.vf_num = 0;
3108                 ctxt.uplink_seid = vsi->uplink_seid;
3109                 ctxt.connection_type = 0x1;
3110                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
3111
3112                 ctxt.info.valid_sections |=
3113                                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
3114                 /* user_param carries flag to enable loop back */
3115                 if (user_param) {
3116                         ctxt.info.switch_id =
3117                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
3118                         ctxt.info.switch_id |=
3119                         rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
3120                 }
3121
3122                 /* Configure port/vlan */
3123                 ctxt.info.valid_sections |=
3124                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3125                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3126                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3127                                                 I40E_DEFAULT_TCMAP);
3128                 if (ret != I40E_SUCCESS) {
3129                         PMD_DRV_LOG(ERR, "Failed to configure "
3130                                         "TC queue mapping");
3131                         goto fail_msix_alloc;
3132                 }
3133                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3134                 ctxt.info.valid_sections |=
3135                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3136         } else if (type == I40E_VSI_FDIR) {
3137                 memset(&ctxt, 0, sizeof(ctxt));
3138                 vsi->uplink_seid = uplink_vsi->uplink_seid;
3139                 ctxt.pf_num = hw->pf_id;
3140                 ctxt.vf_num = 0;
3141                 ctxt.uplink_seid = vsi->uplink_seid;
3142                 ctxt.connection_type = 0x1;     /* regular data port */
3143                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
3144                 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3145                                                 I40E_DEFAULT_TCMAP);
3146                 if (ret != I40E_SUCCESS) {
3147                         PMD_DRV_LOG(ERR, "Failed to configure "
3148                                         "TC queue mapping.");
3149                         goto fail_msix_alloc;
3150                 }
3151                 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3152                 ctxt.info.valid_sections |=
3153                         rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3154         } else {
3155                 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
3156                 goto fail_msix_alloc;
3157         }
3158
3159         if (vsi->type != I40E_VSI_MAIN) {
3160                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
3161                 if (ret != I40E_SUCCESS) {
3162                         PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
3163                                     hw->aq.asq_last_status);
3164                         goto fail_msix_alloc;
3165                 }
3166                 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3167                 vsi->info.valid_sections = 0;
3168                 vsi->seid = ctxt.seid;
3169                 vsi->vsi_id = ctxt.vsi_number;
3170                 vsi->sib_vsi_list.vsi = vsi;
3171                 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3172                                 &vsi->sib_vsi_list, list);
3173         }
3174
3175         /* MAC/VLAN configuration */
3176         (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3177         filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3178
3179         ret = i40e_vsi_add_mac(vsi, &filter);
3180         if (ret != I40E_SUCCESS) {
3181                 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3182                 goto fail_msix_alloc;
3183         }
3184
3185         /* Get VSI BW information */
3186         i40e_vsi_dump_bw_config(vsi);
3187         return vsi;
3188 fail_msix_alloc:
3189         i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3190 fail_queue_alloc:
3191         i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3192 fail_mem:
3193         rte_free(vsi);
3194         return NULL;
3195 }
3196
3197 /* Configure vlan stripping on or off */
3198 int
3199 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3200 {
3201         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3202         struct i40e_vsi_context ctxt;
3203         uint8_t vlan_flags;
3204         int ret = I40E_SUCCESS;
3205
3206         /* Check if it has been already on or off */
3207         if (vsi->info.valid_sections &
3208                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3209                 if (on) {
3210                         if ((vsi->info.port_vlan_flags &
3211                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3212                                 return 0; /* already on */
3213                 } else {
3214                         if ((vsi->info.port_vlan_flags &
3215                                 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3216                                 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3217                                 return 0; /* already off */
3218                 }
3219         }
3220
3221         if (on)
3222                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3223         else
3224                 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3225         vsi->info.valid_sections =
3226                 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3227         vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3228         vsi->info.port_vlan_flags |= vlan_flags;
3229         ctxt.seid = vsi->seid;
3230         (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3231         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3232         if (ret)
3233                 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3234                             on ? "enable" : "disable");
3235
3236         return ret;
3237 }
3238
3239 static int
3240 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3241 {
3242         struct rte_eth_dev_data *data = dev->data;
3243         int ret;
3244
3245         /* Apply vlan offload setting */
3246         i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3247
3248         /* Apply double-vlan setting, not implemented yet */
3249
3250         /* Apply pvid setting */
3251         ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3252                                 data->dev_conf.txmode.hw_vlan_insert_pvid);
3253         if (ret)
3254                 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3255
3256         return ret;
3257 }
3258
3259 static int
3260 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3261 {
3262         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3263
3264         return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3265 }
3266
3267 static int
3268 i40e_update_flow_control(struct i40e_hw *hw)
3269 {
3270 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3271         struct i40e_link_status link_status;
3272         uint32_t rxfc = 0, txfc = 0, reg;
3273         uint8_t an_info;
3274         int ret;
3275
3276         memset(&link_status, 0, sizeof(link_status));
3277         ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3278         if (ret != I40E_SUCCESS) {
3279                 PMD_DRV_LOG(ERR, "Failed to get link status information");
3280                 goto write_reg; /* Disable flow control */
3281         }
3282
3283         an_info = hw->phy.link_info.an_info;
3284         if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3285                 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3286                 ret = I40E_ERR_NOT_READY;
3287                 goto write_reg; /* Disable flow control */
3288         }
3289         /**
3290          * If link auto negotiation is enabled, flow control needs to
3291          * be configured according to it
3292          */
3293         switch (an_info & I40E_LINK_PAUSE_RXTX) {
3294         case I40E_LINK_PAUSE_RXTX:
3295                 rxfc = 1;
3296                 txfc = 1;
3297                 hw->fc.current_mode = I40E_FC_FULL;
3298                 break;
3299         case I40E_AQ_LINK_PAUSE_RX:
3300                 rxfc = 1;
3301                 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3302                 break;
3303         case I40E_AQ_LINK_PAUSE_TX:
3304                 txfc = 1;
3305                 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3306                 break;
3307         default:
3308                 hw->fc.current_mode = I40E_FC_NONE;
3309                 break;
3310         }
3311
3312 write_reg:
3313         I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3314                 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3315         reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3316         reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3317         reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3318         I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3319
3320         return ret;
3321 }
3322
3323 /* PF setup */
3324 static int
3325 i40e_pf_setup(struct i40e_pf *pf)
3326 {
3327         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3328         struct i40e_filter_control_settings settings;
3329         struct i40e_vsi *vsi;
3330         int ret;
3331
3332         /* Clear all stats counters */
3333         pf->offset_loaded = FALSE;
3334         memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3335         memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3336
3337         ret = i40e_pf_get_switch_config(pf);
3338         if (ret != I40E_SUCCESS) {
3339                 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3340                 return ret;
3341         }
3342         if (pf->flags & I40E_FLAG_FDIR) {
3343                 /* make queue allocated first, let FDIR use queue pair 0*/
3344                 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
3345                 if (ret != I40E_FDIR_QUEUE_ID) {
3346                         PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
3347                                     " ret =%d", ret);
3348                         pf->flags &= ~I40E_FLAG_FDIR;
3349                 }
3350         }
3351         /*  main VSI setup */
3352         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3353         if (!vsi) {
3354                 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3355                 return I40E_ERR_NOT_READY;
3356         }
3357         pf->main_vsi = vsi;
3358
3359         /* Configure filter control */
3360         memset(&settings, 0, sizeof(settings));
3361         if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3362                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3363         else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3364                 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3365         else {
3366                 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3367                                                 hw->func_caps.rss_table_size);
3368                 return I40E_ERR_PARAM;
3369         }
3370         PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3371                         "size: %u\n", hw->func_caps.rss_table_size);
3372         pf->hash_lut_size = hw->func_caps.rss_table_size;
3373
3374         /* Enable ethtype and macvlan filters */
3375         settings.enable_ethtype = TRUE;
3376         settings.enable_macvlan = TRUE;
3377         ret = i40e_set_filter_control(hw, &settings);
3378         if (ret)
3379                 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3380                                                                 ret);
3381
3382         /* Update flow control according to the auto negotiation */
3383         i40e_update_flow_control(hw);
3384
3385         return I40E_SUCCESS;
3386 }
3387
3388 int
3389 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3390 {
3391         uint32_t reg;
3392         uint16_t j;
3393
3394         /**
3395          * Set or clear TX Queue Disable flags,
3396          * which is required by hardware.
3397          */
3398         i40e_pre_tx_queue_cfg(hw, q_idx, on);
3399         rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3400
3401         /* Wait until the request is finished */
3402         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3403                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3404                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3405                 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3406                         ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3407                                                         & 0x1))) {
3408                         break;
3409                 }
3410         }
3411         if (on) {
3412                 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3413                         return I40E_SUCCESS; /* already on, skip next steps */
3414
3415                 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3416                 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3417         } else {
3418                 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3419                         return I40E_SUCCESS; /* already off, skip next steps */
3420                 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3421         }
3422         /* Write the register */
3423         I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3424         /* Check the result */
3425         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3426                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3427                 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3428                 if (on) {
3429                         if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3430                                 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3431                                 break;
3432                 } else {
3433                         if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3434                                 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3435                                 break;
3436                 }
3437         }
3438         /* Check if it is timeout */
3439         if (j >= I40E_CHK_Q_ENA_COUNT) {
3440                 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3441                             (on ? "enable" : "disable"), q_idx);
3442                 return I40E_ERR_TIMEOUT;
3443         }
3444
3445         return I40E_SUCCESS;
3446 }
3447
3448 /* Swith on or off the tx queues */
3449 static int
3450 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3451 {
3452         struct rte_eth_dev_data *dev_data = pf->dev_data;
3453         struct i40e_tx_queue *txq;
3454         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3455         uint16_t i;
3456         int ret;
3457
3458         for (i = 0; i < dev_data->nb_tx_queues; i++) {
3459                 txq = dev_data->tx_queues[i];
3460                 /* Don't operate the queue if not configured or
3461                  * if starting only per queue */
3462                 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3463                         continue;
3464                 if (on)
3465                         ret = i40e_dev_tx_queue_start(dev, i);
3466                 else
3467                         ret = i40e_dev_tx_queue_stop(dev, i);
3468                 if ( ret != I40E_SUCCESS)
3469                         return ret;
3470         }
3471
3472         return I40E_SUCCESS;
3473 }
3474
3475 int
3476 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3477 {
3478         uint32_t reg;
3479         uint16_t j;
3480
3481         /* Wait until the request is finished */
3482         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3483                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3484                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3485                 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3486                         ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3487                         break;
3488         }
3489
3490         if (on) {
3491                 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3492                         return I40E_SUCCESS; /* Already on, skip next steps */
3493                 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3494         } else {
3495                 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3496                         return I40E_SUCCESS; /* Already off, skip next steps */
3497                 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3498         }
3499
3500         /* Write the register */
3501         I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3502         /* Check the result */
3503         for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3504                 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3505                 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3506                 if (on) {
3507                         if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3508                                 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3509                                 break;
3510                 } else {
3511                         if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3512                                 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3513                                 break;
3514                 }
3515         }
3516
3517         /* Check if it is timeout */
3518         if (j >= I40E_CHK_Q_ENA_COUNT) {
3519                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3520                             (on ? "enable" : "disable"), q_idx);
3521                 return I40E_ERR_TIMEOUT;
3522         }
3523
3524         return I40E_SUCCESS;
3525 }
3526 /* Switch on or off the rx queues */
3527 static int
3528 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3529 {
3530         struct rte_eth_dev_data *dev_data = pf->dev_data;
3531         struct i40e_rx_queue *rxq;
3532         struct rte_eth_dev *dev = pf->adapter->eth_dev;
3533         uint16_t i;
3534         int ret;
3535
3536         for (i = 0; i < dev_data->nb_rx_queues; i++) {
3537                 rxq = dev_data->rx_queues[i];
3538                 /* Don't operate the queue if not configured or
3539                  * if starting only per queue */
3540                 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3541                         continue;
3542                 if (on)
3543                         ret = i40e_dev_rx_queue_start(dev, i);
3544                 else
3545                         ret = i40e_dev_rx_queue_stop(dev, i);
3546                 if (ret != I40E_SUCCESS)
3547                         return ret;
3548         }
3549
3550         return I40E_SUCCESS;
3551 }
3552
3553 /* Switch on or off all the rx/tx queues */
3554 int
3555 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3556 {
3557         int ret;
3558
3559         if (on) {
3560                 /* enable rx queues before enabling tx queues */
3561                 ret = i40e_dev_switch_rx_queues(pf, on);
3562                 if (ret) {
3563                         PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3564                         return ret;
3565                 }
3566                 ret = i40e_dev_switch_tx_queues(pf, on);
3567         } else {
3568                 /* Stop tx queues before stopping rx queues */
3569                 ret = i40e_dev_switch_tx_queues(pf, on);
3570                 if (ret) {
3571                         PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3572                         return ret;
3573                 }
3574                 ret = i40e_dev_switch_rx_queues(pf, on);
3575         }
3576
3577         return ret;
3578 }
3579
3580 /* Initialize VSI for TX */
3581 static int
3582 i40e_dev_tx_init(struct i40e_pf *pf)
3583 {
3584         struct rte_eth_dev_data *data = pf->dev_data;
3585         uint16_t i;
3586         uint32_t ret = I40E_SUCCESS;
3587         struct i40e_tx_queue *txq;
3588
3589         for (i = 0; i < data->nb_tx_queues; i++) {
3590                 txq = data->tx_queues[i];
3591                 if (!txq || !txq->q_set)
3592                         continue;
3593                 ret = i40e_tx_queue_init(txq);
3594                 if (ret != I40E_SUCCESS)
3595                         break;
3596         }
3597
3598         return ret;
3599 }
3600
3601 /* Initialize VSI for RX */
3602 static int
3603 i40e_dev_rx_init(struct i40e_pf *pf)
3604 {
3605         struct rte_eth_dev_data *data = pf->dev_data;
3606         int ret = I40E_SUCCESS;
3607         uint16_t i;
3608         struct i40e_rx_queue *rxq;
3609
3610         i40e_pf_config_mq_rx(pf);
3611         for (i = 0; i < data->nb_rx_queues; i++) {
3612                 rxq = data->rx_queues[i];
3613                 if (!rxq || !rxq->q_set)
3614                         continue;
3615
3616                 ret = i40e_rx_queue_init(rxq);
3617                 if (ret != I40E_SUCCESS) {
3618                         PMD_DRV_LOG(ERR, "Failed to do RX queue "
3619                                     "initialization");
3620                         break;
3621                 }
3622         }
3623
3624         return ret;
3625 }
3626
3627 static int
3628 i40e_dev_rxtx_init(struct i40e_pf *pf)
3629 {
3630         int err;
3631
3632         err = i40e_dev_tx_init(pf);
3633         if (err) {
3634                 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3635                 return err;
3636         }
3637         err = i40e_dev_rx_init(pf);
3638         if (err) {
3639                 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3640                 return err;
3641         }
3642
3643         return err;
3644 }
3645
3646 static int
3647 i40e_vmdq_setup(struct rte_eth_dev *dev)
3648 {
3649         struct rte_eth_conf *conf = &dev->data->dev_conf;
3650         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3651         int i, err, conf_vsis, j, loop;
3652         struct i40e_vsi *vsi;
3653         struct i40e_vmdq_info *vmdq_info;
3654         struct rte_eth_vmdq_rx_conf *vmdq_conf;
3655         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3656
3657         /*
3658          * Disable interrupt to avoid message from VF. Furthermore, it will
3659          * avoid race condition in VSI creation/destroy.
3660          */
3661         i40e_pf_disable_irq0(hw);
3662
3663         if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3664                 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3665                 return -ENOTSUP;
3666         }
3667
3668         conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3669         if (conf_vsis > pf->max_nb_vmdq_vsi) {
3670                 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3671                         conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3672                         pf->max_nb_vmdq_vsi);
3673                 return -ENOTSUP;
3674         }
3675
3676         if (pf->vmdq != NULL) {
3677                 PMD_INIT_LOG(INFO, "VMDQ already configured");
3678                 return 0;
3679         }
3680
3681         pf->vmdq = rte_zmalloc("vmdq_info_struct",
3682                                 sizeof(*vmdq_info) * conf_vsis, 0);
3683
3684         if (pf->vmdq == NULL) {
3685                 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3686                 return -ENOMEM;
3687         }
3688
3689         vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3690
3691         /* Create VMDQ VSI */
3692         for (i = 0; i < conf_vsis; i++) {
3693                 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3694                                 vmdq_conf->enable_loop_back);
3695                 if (vsi == NULL) {
3696                         PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3697                         err = -1;
3698                         goto err_vsi_setup;
3699                 }
3700                 vmdq_info = &pf->vmdq[i];
3701                 vmdq_info->pf = pf;
3702                 vmdq_info->vsi = vsi;
3703         }
3704         pf->nb_cfg_vmdq_vsi = conf_vsis;
3705
3706         /* Configure Vlan */
3707         loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3708         for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3709                 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3710                         if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3711                                 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3712                                         vmdq_conf->pool_map[i].vlan_id, j);
3713
3714                                 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3715                                                 vmdq_conf->pool_map[i].vlan_id);
3716                                 if (err) {
3717                                         PMD_INIT_LOG(ERR, "Failed to add vlan");
3718                                         err = -1;
3719                                         goto err_vsi_setup;
3720                                 }
3721                         }
3722                 }
3723         }
3724
3725         i40e_pf_enable_irq0(hw);
3726
3727         return 0;
3728
3729 err_vsi_setup:
3730         for (i = 0; i < conf_vsis; i++)
3731                 if (pf->vmdq[i].vsi == NULL)
3732                         break;
3733                 else
3734                         i40e_vsi_release(pf->vmdq[i].vsi);
3735
3736         rte_free(pf->vmdq);
3737         pf->vmdq = NULL;
3738         i40e_pf_enable_irq0(hw);
3739         return err;
3740 }
3741
3742 static void
3743 i40e_stat_update_32(struct i40e_hw *hw,
3744                    uint32_t reg,
3745                    bool offset_loaded,
3746                    uint64_t *offset,
3747                    uint64_t *stat)
3748 {
3749         uint64_t new_data;
3750
3751         new_data = (uint64_t)I40E_READ_REG(hw, reg);
3752         if (!offset_loaded)
3753                 *offset = new_data;
3754
3755         if (new_data >= *offset)
3756                 *stat = (uint64_t)(new_data - *offset);
3757         else
3758                 *stat = (uint64_t)((new_data +
3759                         ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
3760 }
3761
3762 static void
3763 i40e_stat_update_48(struct i40e_hw *hw,
3764                    uint32_t hireg,
3765                    uint32_t loreg,
3766                    bool offset_loaded,
3767                    uint64_t *offset,
3768                    uint64_t *stat)
3769 {
3770         uint64_t new_data;
3771
3772         new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3773         new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3774                         I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
3775
3776         if (!offset_loaded)
3777                 *offset = new_data;
3778
3779         if (new_data >= *offset)
3780                 *stat = new_data - *offset;
3781         else
3782                 *stat = (uint64_t)((new_data +
3783                         ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
3784
3785         *stat &= I40E_48_BIT_MASK;
3786 }
3787
3788 /* Disable IRQ0 */
3789 void
3790 i40e_pf_disable_irq0(struct i40e_hw *hw)
3791 {
3792         /* Disable all interrupt types */
3793         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3794         I40E_WRITE_FLUSH(hw);
3795 }
3796
3797 /* Enable IRQ0 */
3798 void
3799 i40e_pf_enable_irq0(struct i40e_hw *hw)
3800 {
3801         I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3802                 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3803                 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3804                 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3805         I40E_WRITE_FLUSH(hw);
3806 }
3807
3808 static void
3809 i40e_pf_config_irq0(struct i40e_hw *hw)
3810 {
3811         /* read pending request and disable first */
3812         i40e_pf_disable_irq0(hw);
3813         I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3814         I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3815                 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3816
3817         /* Link no queues with irq0 */
3818         I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3819                 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3820 }
3821
3822 static void
3823 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3824 {
3825         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3826         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3827         int i;
3828         uint16_t abs_vf_id;
3829         uint32_t index, offset, val;
3830
3831         if (!pf->vfs)
3832                 return;
3833         /**
3834          * Try to find which VF trigger a reset, use absolute VF id to access
3835          * since the reg is global register.
3836          */
3837         for (i = 0; i < pf->vf_num; i++) {
3838                 abs_vf_id = hw->func_caps.vf_base_id + i;
3839                 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3840                 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3841                 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3842                 /* VFR event occured */
3843                 if (val & (0x1 << offset)) {
3844                         int ret;
3845
3846                         /* Clear the event first */
3847                         I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3848                                                         (0x1 << offset));
3849                         PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3850                         /**
3851                          * Only notify a VF reset event occured,
3852                          * don't trigger another SW reset
3853                          */
3854                         ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3855                         if (ret != I40E_SUCCESS)
3856                                 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3857                 }
3858         }
3859 }
3860
3861 static void
3862 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3863 {
3864         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3865         struct i40e_arq_event_info info;
3866         uint16_t pending, opcode;
3867         int ret;
3868
3869         info.buf_len = I40E_AQ_BUF_SZ;
3870         info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3871         if (!info.msg_buf) {
3872                 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3873                 return;
3874         }
3875
3876         pending = 1;
3877         while (pending) {
3878                 ret = i40e_clean_arq_element(hw, &info, &pending);
3879
3880                 if (ret != I40E_SUCCESS) {
3881                         PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3882                                     "aq_err: %u", hw->aq.asq_last_status);
3883                         break;
3884                 }
3885                 opcode = rte_le_to_cpu_16(info.desc.opcode);
3886
3887                 switch (opcode) {
3888                 case i40e_aqc_opc_send_msg_to_pf:
3889                         /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3890                         i40e_pf_host_handle_vf_msg(dev,
3891                                         rte_le_to_cpu_16(info.desc.retval),
3892                                         rte_le_to_cpu_32(info.desc.cookie_high),
3893                                         rte_le_to_cpu_32(info.desc.cookie_low),
3894                                         info.msg_buf,
3895                                         info.msg_len);
3896                         break;
3897                 default:
3898                         PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3899                                     opcode);
3900                         break;
3901                 }
3902         }
3903         rte_free(info.msg_buf);
3904 }
3905
3906 /*
3907  * Interrupt handler is registered as the alarm callback for handling LSC
3908  * interrupt in a definite of time, in order to wait the NIC into a stable
3909  * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3910  * no need for link down interrupt.
3911  */
3912 static void
3913 i40e_dev_interrupt_delayed_handler(void *param)
3914 {
3915         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3916         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3917         uint32_t icr0;
3918
3919         /* read interrupt causes again */
3920         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3921
3922 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3923         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3924                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3925         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3926                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3927         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3928                 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3929         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3930                 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3931         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3932                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3933                                                                 "state\n");
3934         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3935                 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3936         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3937                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3938 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3939
3940         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3941                 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3942                 i40e_dev_handle_vfr_event(dev);
3943         }
3944         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3945                 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3946                 i40e_dev_handle_aq_msg(dev);
3947         }
3948
3949         /* handle the link up interrupt in an alarm callback */
3950         i40e_dev_link_update(dev, 0);
3951         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3952
3953         i40e_pf_enable_irq0(hw);
3954         rte_intr_enable(&(dev->pci_dev->intr_handle));
3955 }
3956
3957 /**
3958  * Interrupt handler triggered by NIC  for handling
3959  * specific interrupt.
3960  *
3961  * @param handle
3962  *  Pointer to interrupt handle.
3963  * @param param
3964  *  The address of parameter (struct rte_eth_dev *) regsitered before.
3965  *
3966  * @return
3967  *  void
3968  */
3969 static void
3970 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3971                            void *param)
3972 {
3973         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3974         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3975         uint32_t icr0;
3976
3977         /* Disable interrupt */
3978         i40e_pf_disable_irq0(hw);
3979
3980         /* read out interrupt causes */
3981         icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3982
3983         /* No interrupt event indicated */
3984         if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3985                 PMD_DRV_LOG(INFO, "No interrupt event");
3986                 goto done;
3987         }
3988 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3989         if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3990                 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
3991         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3992                 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
3993         if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3994                 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
3995         if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3996                 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
3997         if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3998                 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
3999         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4000                 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4001         if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4002                 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4003 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4004
4005         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4006                 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4007                 i40e_dev_handle_vfr_event(dev);
4008         }
4009         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4010                 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4011                 i40e_dev_handle_aq_msg(dev);
4012         }
4013
4014         /* Link Status Change interrupt */
4015         if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4016 #define I40E_US_PER_SECOND 1000000
4017                 struct rte_eth_link link;
4018
4019                 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4020                 memset(&link, 0, sizeof(link));
4021                 rte_i40e_dev_atomic_read_link_status(dev, &link);
4022                 i40e_dev_link_update(dev, 0);
4023
4024                 /*
4025                  * For link up interrupt, it needs to wait 1 second to let the
4026                  * hardware be a stable state. Otherwise several consecutive
4027                  * interrupts can be observed.
4028                  * For link down interrupt, no need to wait.
4029                  */
4030                 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4031                         i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4032                         return;
4033                 else
4034                         _rte_eth_dev_callback_process(dev,
4035                                 RTE_ETH_EVENT_INTR_LSC);
4036         }
4037
4038 done:
4039         /* Enable interrupt */
4040         i40e_pf_enable_irq0(hw);
4041         rte_intr_enable(&(dev->pci_dev->intr_handle));
4042 }
4043
4044 static int
4045 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4046                          struct i40e_macvlan_filter *filter,
4047                          int total)
4048 {
4049         int ele_num, ele_buff_size;
4050         int num, actual_num, i;
4051         uint16_t flags;
4052         int ret = I40E_SUCCESS;
4053         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4054         struct i40e_aqc_add_macvlan_element_data *req_list;
4055
4056         if (filter == NULL  || total == 0)
4057                 return I40E_ERR_PARAM;
4058         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4059         ele_buff_size = hw->aq.asq_buf_size;
4060
4061         req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
4062         if (req_list == NULL) {
4063                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4064                 return I40E_ERR_NO_MEMORY;
4065         }
4066
4067         num = 0;
4068         do {
4069                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4070                 memset(req_list, 0, ele_buff_size);
4071
4072                 for (i = 0; i < actual_num; i++) {
4073                         (void)rte_memcpy(req_list[i].mac_addr,
4074                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4075                         req_list[i].vlan_tag =
4076                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4077
4078                         switch (filter[num + i].filter_type) {
4079                         case RTE_MAC_PERFECT_MATCH:
4080                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
4081                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4082                                 break;
4083                         case RTE_MACVLAN_PERFECT_MATCH:
4084                                 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
4085                                 break;
4086                         case RTE_MAC_HASH_MATCH:
4087                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
4088                                         I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4089                                 break;
4090                         case RTE_MACVLAN_HASH_MATCH:
4091                                 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
4092                                 break;
4093                         default:
4094                                 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
4095                                 ret = I40E_ERR_PARAM;
4096                                 goto DONE;
4097                         }
4098
4099                         req_list[i].queue_number = 0;
4100
4101                         req_list[i].flags = rte_cpu_to_le_16(flags);
4102                 }
4103
4104                 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
4105                                                 actual_num, NULL);
4106                 if (ret != I40E_SUCCESS) {
4107                         PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
4108                         goto DONE;
4109                 }
4110                 num += actual_num;
4111         } while (num < total);
4112
4113 DONE:
4114         rte_free(req_list);
4115         return ret;
4116 }
4117
4118 static int
4119 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
4120                             struct i40e_macvlan_filter *filter,
4121                             int total)
4122 {
4123         int ele_num, ele_buff_size;
4124         int num, actual_num, i;
4125         uint16_t flags;
4126         int ret = I40E_SUCCESS;
4127         struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4128         struct i40e_aqc_remove_macvlan_element_data *req_list;
4129
4130         if (filter == NULL  || total == 0)
4131                 return I40E_ERR_PARAM;
4132
4133         ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4134         ele_buff_size = hw->aq.asq_buf_size;
4135
4136         req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
4137         if (req_list == NULL) {
4138                 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4139                 return I40E_ERR_NO_MEMORY;
4140         }
4141
4142         num = 0;
4143         do {
4144                 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4145                 memset(req_list, 0, ele_buff_size);
4146
4147                 for (i = 0; i < actual_num; i++) {
4148                         (void)rte_memcpy(req_list[i].mac_addr,
4149                                 &filter[num + i].macaddr, ETH_ADDR_LEN);
4150                         req_list[i].vlan_tag =
4151                                 rte_cpu_to_le_16(filter[num + i].vlan_id);
4152
4153                         switch (filter[num + i].filter_type) {
4154                         case RTE_MAC_PERFECT_MATCH:
4155                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4156                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4157                                 break;
4158                         case RTE_MACVLAN_PERFECT_MATCH:
4159                                 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4160                                 break;
4161                         case RTE_MAC_HASH_MATCH:
4162                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
4163                                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4164                                 break;
4165                         case RTE_MACVLAN_HASH_MATCH:
4166                                 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
4167                                 break;
4168                         default:
4169                                 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
4170                                 ret = I40E_ERR_PARAM;
4171                                 goto DONE;
4172                         }
4173                         req_list[i].flags = rte_cpu_to_le_16(flags);
4174                 }
4175
4176                 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4177                                                 actual_num, NULL);
4178                 if (ret != I40E_SUCCESS) {
4179                         PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4180                         goto DONE;
4181                 }
4182                 num += actual_num;
4183         } while (num < total);
4184
4185 DONE:
4186         rte_free(req_list);
4187         return ret;
4188 }
4189
4190 /* Find out specific MAC filter */
4191 static struct i40e_mac_filter *
4192 i40e_find_mac_filter(struct i40e_vsi *vsi,
4193                          struct ether_addr *macaddr)
4194 {
4195         struct i40e_mac_filter *f;
4196
4197         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4198                 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4199                         return f;
4200         }
4201
4202         return NULL;
4203 }
4204
4205 static bool
4206 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4207                          uint16_t vlan_id)
4208 {
4209         uint32_t vid_idx, vid_bit;
4210
4211         if (vlan_id > ETH_VLAN_ID_MAX)
4212                 return 0;
4213
4214         vid_idx = I40E_VFTA_IDX(vlan_id);
4215         vid_bit = I40E_VFTA_BIT(vlan_id);
4216
4217         if (vsi->vfta[vid_idx] & vid_bit)
4218                 return 1;
4219         else
4220                 return 0;
4221 }
4222
4223 static void
4224 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4225                          uint16_t vlan_id, bool on)
4226 {
4227         uint32_t vid_idx, vid_bit;
4228
4229         if (vlan_id > ETH_VLAN_ID_MAX)
4230                 return;
4231
4232         vid_idx = I40E_VFTA_IDX(vlan_id);
4233         vid_bit = I40E_VFTA_BIT(vlan_id);
4234
4235         if (on)
4236                 vsi->vfta[vid_idx] |= vid_bit;
4237         else
4238                 vsi->vfta[vid_idx] &= ~vid_bit;
4239 }
4240
4241 /**
4242  * Find all vlan options for specific mac addr,
4243  * return with actual vlan found.
4244  */
4245 static inline int
4246 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4247                            struct i40e_macvlan_filter *mv_f,
4248                            int num, struct ether_addr *addr)
4249 {
4250         int i;
4251         uint32_t j, k;
4252
4253         /**
4254          * Not to use i40e_find_vlan_filter to decrease the loop time,
4255          * although the code looks complex.
4256           */
4257         if (num < vsi->vlan_num)
4258                 return I40E_ERR_PARAM;
4259
4260         i = 0;
4261         for (j = 0; j < I40E_VFTA_SIZE; j++) {
4262                 if (vsi->vfta[j]) {
4263                         for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4264                                 if (vsi->vfta[j] & (1 << k)) {
4265                                         if (i > num - 1) {
4266                                                 PMD_DRV_LOG(ERR, "vlan number "
4267                                                             "not match");
4268                                                 return I40E_ERR_PARAM;
4269                                         }
4270                                         (void)rte_memcpy(&mv_f[i].macaddr,
4271                                                         addr, ETH_ADDR_LEN);
4272                                         mv_f[i].vlan_id =
4273                                                 j * I40E_UINT32_BIT_SIZE + k;
4274                                         i++;
4275                                 }
4276                         }
4277                 }
4278         }
4279         return I40E_SUCCESS;
4280 }
4281
4282 static inline int
4283 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4284                            struct i40e_macvlan_filter *mv_f,
4285                            int num,
4286                            uint16_t vlan)
4287 {
4288         int i = 0;
4289         struct i40e_mac_filter *f;
4290
4291         if (num < vsi->mac_num)
4292                 return I40E_ERR_PARAM;
4293
4294         TAILQ_FOREACH(f, &vsi->mac_list, next) {
4295                 if (i > num - 1) {
4296                         PMD_DRV_LOG(ERR, "buffer number not match");
4297                         return I40E_ERR_PARAM;
4298                 }
4299                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4300                                 ETH_ADDR_LEN);
4301                 mv_f[i].vlan_id = vlan;
4302                 mv_f[i].filter_type = f->mac_info.filter_type;
4303                 i++;
4304         }
4305
4306         return I40E_SUCCESS;
4307 }
4308
4309 static int
4310 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4311 {
4312         int i, num;
4313         struct i40e_mac_filter *f;
4314         struct i40e_macvlan_filter *mv_f;
4315         int ret = I40E_SUCCESS;
4316
4317         if (vsi == NULL || vsi->mac_num == 0)
4318                 return I40E_ERR_PARAM;
4319
4320         /* Case that no vlan is set */
4321         if (vsi->vlan_num == 0)
4322                 num = vsi->mac_num;
4323         else
4324                 num = vsi->mac_num * vsi->vlan_num;
4325
4326         mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4327         if (mv_f == NULL) {
4328                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4329                 return I40E_ERR_NO_MEMORY;
4330         }
4331
4332         i = 0;
4333         if (vsi->vlan_num == 0) {
4334                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4335                         (void)rte_memcpy(&mv_f[i].macaddr,
4336                                 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4337                         mv_f[i].vlan_id = 0;
4338                         i++;
4339                 }
4340         } else {
4341                 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4342                         ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4343                                         vsi->vlan_num, &f->mac_info.mac_addr);
4344                         if (ret != I40E_SUCCESS)
4345                                 goto DONE;
4346                         i += vsi->vlan_num;
4347                 }
4348         }
4349
4350         ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4351 DONE:
4352         rte_free(mv_f);
4353
4354         return ret;
4355 }
4356
4357 int
4358 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4359 {
4360         struct i40e_macvlan_filter *mv_f;
4361         int mac_num;
4362         int ret = I40E_SUCCESS;
4363
4364         if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4365                 return I40E_ERR_PARAM;
4366
4367         /* If it's already set, just return */
4368         if (i40e_find_vlan_filter(vsi,vlan))
4369                 return I40E_SUCCESS;
4370
4371         mac_num = vsi->mac_num;
4372
4373         if (mac_num == 0) {
4374                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4375                 return I40E_ERR_PARAM;
4376         }
4377
4378         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4379
4380         if (mv_f == NULL) {
4381                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4382                 return I40E_ERR_NO_MEMORY;
4383         }
4384
4385         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4386
4387         if (ret != I40E_SUCCESS)
4388                 goto DONE;
4389
4390         ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4391
4392         if (ret != I40E_SUCCESS)
4393                 goto DONE;
4394
4395         i40e_set_vlan_filter(vsi, vlan, 1);
4396
4397         vsi->vlan_num++;
4398         ret = I40E_SUCCESS;
4399 DONE:
4400         rte_free(mv_f);
4401         return ret;
4402 }
4403
4404 int
4405 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4406 {
4407         struct i40e_macvlan_filter *mv_f;
4408         int mac_num;
4409         int ret = I40E_SUCCESS;
4410
4411         /**
4412          * Vlan 0 is the generic filter for untagged packets
4413          * and can't be removed.
4414          */
4415         if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4416                 return I40E_ERR_PARAM;
4417
4418         /* If can't find it, just return */
4419         if (!i40e_find_vlan_filter(vsi, vlan))
4420                 return I40E_ERR_PARAM;
4421
4422         mac_num = vsi->mac_num;
4423
4424         if (mac_num == 0) {
4425                 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4426                 return I40E_ERR_PARAM;
4427         }
4428
4429         mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4430
4431         if (mv_f == NULL) {
4432                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4433                 return I40E_ERR_NO_MEMORY;
4434         }
4435
4436         ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4437
4438         if (ret != I40E_SUCCESS)
4439                 goto DONE;
4440
4441         ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4442
4443         if (ret != I40E_SUCCESS)
4444                 goto DONE;
4445
4446         /* This is last vlan to remove, replace all mac filter with vlan 0 */
4447         if (vsi->vlan_num == 1) {
4448                 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4449                 if (ret != I40E_SUCCESS)
4450                         goto DONE;
4451
4452                 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4453                 if (ret != I40E_SUCCESS)
4454                         goto DONE;
4455         }
4456
4457         i40e_set_vlan_filter(vsi, vlan, 0);
4458
4459         vsi->vlan_num--;
4460         ret = I40E_SUCCESS;
4461 DONE:
4462         rte_free(mv_f);
4463         return ret;
4464 }
4465
4466 int
4467 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4468 {
4469         struct i40e_mac_filter *f;
4470         struct i40e_macvlan_filter *mv_f;
4471         int i, vlan_num = 0;
4472         int ret = I40E_SUCCESS;
4473
4474         /* If it's add and we've config it, return */
4475         f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4476         if (f != NULL)
4477                 return I40E_SUCCESS;
4478         if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4479                 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4480
4481                 /**
4482                  * If vlan_num is 0, that's the first time to add mac,
4483                  * set mask for vlan_id 0.
4484                  */
4485                 if (vsi->vlan_num == 0) {
4486                         i40e_set_vlan_filter(vsi, 0, 1);
4487                         vsi->vlan_num = 1;
4488                 }
4489                 vlan_num = vsi->vlan_num;
4490         } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4491                         (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4492                 vlan_num = 1;
4493
4494         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4495         if (mv_f == NULL) {
4496                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4497                 return I40E_ERR_NO_MEMORY;
4498         }
4499
4500         for (i = 0; i < vlan_num; i++) {
4501                 mv_f[i].filter_type = mac_filter->filter_type;
4502                 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4503                                 ETH_ADDR_LEN);
4504         }
4505
4506         if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4507                 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4508                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4509                                         &mac_filter->mac_addr);
4510                 if (ret != I40E_SUCCESS)
4511                         goto DONE;
4512         }
4513
4514         ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4515         if (ret != I40E_SUCCESS)
4516                 goto DONE;
4517
4518         /* Add the mac addr into mac list */
4519         f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4520         if (f == NULL) {
4521                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4522                 ret = I40E_ERR_NO_MEMORY;
4523                 goto DONE;
4524         }
4525         (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4526                         ETH_ADDR_LEN);
4527         f->mac_info.filter_type = mac_filter->filter_type;
4528         TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4529         vsi->mac_num++;
4530
4531         ret = I40E_SUCCESS;
4532 DONE:
4533         rte_free(mv_f);
4534
4535         return ret;
4536 }
4537
4538 int
4539 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4540 {
4541         struct i40e_mac_filter *f;
4542         struct i40e_macvlan_filter *mv_f;
4543         int i, vlan_num;
4544         enum rte_mac_filter_type filter_type;
4545         int ret = I40E_SUCCESS;
4546
4547         /* Can't find it, return an error */
4548         f = i40e_find_mac_filter(vsi, addr);
4549         if (f == NULL)
4550                 return I40E_ERR_PARAM;
4551
4552         vlan_num = vsi->vlan_num;
4553         filter_type = f->mac_info.filter_type;
4554         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4555                 filter_type == RTE_MACVLAN_HASH_MATCH) {
4556                 if (vlan_num == 0) {
4557                         PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4558                         return I40E_ERR_PARAM;
4559                 }
4560         } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4561                         filter_type == RTE_MAC_HASH_MATCH)
4562                 vlan_num = 1;
4563
4564         mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4565         if (mv_f == NULL) {
4566                 PMD_DRV_LOG(ERR, "failed to allocate memory");
4567                 return I40E_ERR_NO_MEMORY;
4568         }
4569
4570         for (i = 0; i < vlan_num; i++) {
4571                 mv_f[i].filter_type = filter_type;
4572                 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4573                                 ETH_ADDR_LEN);
4574         }
4575         if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4576                         filter_type == RTE_MACVLAN_HASH_MATCH) {
4577                 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4578                 if (ret != I40E_SUCCESS)
4579                         goto DONE;
4580         }
4581
4582         ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4583         if (ret != I40E_SUCCESS)
4584                 goto DONE;
4585
4586         /* Remove the mac addr into mac list */
4587         TAILQ_REMOVE(&vsi->mac_list, f, next);
4588         rte_free(f);
4589         vsi->mac_num--;
4590
4591         ret = I40E_SUCCESS;
4592 DONE:
4593         rte_free(mv_f);
4594         return ret;
4595 }
4596
4597 /* Configure hash enable flags for RSS */
4598 uint64_t
4599 i40e_config_hena(uint64_t flags)
4600 {
4601         uint64_t hena = 0;
4602
4603         if (!flags)
4604                 return hena;
4605
4606         if (flags & ETH_RSS_FRAG_IPV4)
4607                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4608         if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
4609                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4610         if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
4611                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4612         if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
4613                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4614         if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
4615                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4616         if (flags & ETH_RSS_FRAG_IPV6)
4617                 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4618         if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
4619                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4620         if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
4621                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4622         if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
4623                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4624         if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
4625                 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4626         if (flags & ETH_RSS_L2_PAYLOAD)
4627                 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4628
4629         return hena;
4630 }
4631
4632 /* Parse the hash enable flags */
4633 uint64_t
4634 i40e_parse_hena(uint64_t flags)
4635 {
4636         uint64_t rss_hf = 0;
4637
4638         if (!flags)
4639                 return rss_hf;
4640         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4641                 rss_hf |= ETH_RSS_FRAG_IPV4;
4642         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4643                 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
4644         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4645                 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
4646         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4647                 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
4648         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4649                 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
4650         if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4651                 rss_hf |= ETH_RSS_FRAG_IPV6;
4652         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4653                 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
4654         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4655                 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
4656         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4657                 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
4658         if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4659                 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
4660         if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4661                 rss_hf |= ETH_RSS_L2_PAYLOAD;
4662
4663         return rss_hf;
4664 }
4665
4666 /* Disable RSS */
4667 static void
4668 i40e_pf_disable_rss(struct i40e_pf *pf)
4669 {
4670         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4671         uint64_t hena;
4672
4673         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4674         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4675         hena &= ~I40E_RSS_HENA_ALL;
4676         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4677         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4678         I40E_WRITE_FLUSH(hw);
4679 }
4680
4681 static int
4682 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4683 {
4684         uint32_t *hash_key;
4685         uint8_t hash_key_len;
4686         uint64_t rss_hf;
4687         uint16_t i;
4688         uint64_t hena;
4689
4690         hash_key = (uint32_t *)(rss_conf->rss_key);
4691         hash_key_len = rss_conf->rss_key_len;
4692         if (hash_key != NULL && hash_key_len >=
4693                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4694                 /* Fill in RSS hash key */
4695                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4696                         I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4697         }
4698
4699         rss_hf = rss_conf->rss_hf;
4700         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4701         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4702         hena &= ~I40E_RSS_HENA_ALL;
4703         hena |= i40e_config_hena(rss_hf);
4704         I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4705         I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4706         I40E_WRITE_FLUSH(hw);
4707
4708         return 0;
4709 }
4710
4711 static int
4712 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4713                          struct rte_eth_rss_conf *rss_conf)
4714 {
4715         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4716         uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4717         uint64_t hena;
4718
4719         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4720         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4721         if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4722                 if (rss_hf != 0) /* Enable RSS */
4723                         return -EINVAL;
4724                 return 0; /* Nothing to do */
4725         }
4726         /* RSS enabled */
4727         if (rss_hf == 0) /* Disable RSS */
4728                 return -EINVAL;
4729
4730         return i40e_hw_rss_hash_set(hw, rss_conf);
4731 }
4732
4733 static int
4734 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4735                            struct rte_eth_rss_conf *rss_conf)
4736 {
4737         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4738         uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4739         uint64_t hena;
4740         uint16_t i;
4741
4742         if (hash_key != NULL) {
4743                 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4744                         hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4745                 rss_conf->rss_key_len = i * sizeof(uint32_t);
4746         }
4747         hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4748         hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4749         rss_conf->rss_hf = i40e_parse_hena(hena);
4750
4751         return 0;
4752 }
4753
4754 static int
4755 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4756 {
4757         switch (filter_type) {
4758         case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4759                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4760                 break;
4761         case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4762                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4763                 break;
4764         case RTE_TUNNEL_FILTER_IMAC_TENID:
4765                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4766                 break;
4767         case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4768                 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4769                 break;
4770         case ETH_TUNNEL_FILTER_IMAC:
4771                 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4772                 break;
4773         default:
4774                 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4775                 return -EINVAL;
4776         }
4777
4778         return 0;
4779 }
4780
4781 static int
4782 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4783                         struct rte_eth_tunnel_filter_conf *tunnel_filter,
4784                         uint8_t add)
4785 {
4786         uint16_t ip_type;
4787         uint8_t tun_type = 0;
4788         int val, ret = 0;
4789         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4790         struct i40e_vsi *vsi = pf->main_vsi;
4791         struct i40e_aqc_add_remove_cloud_filters_element_data  *cld_filter;
4792         struct i40e_aqc_add_remove_cloud_filters_element_data  *pfilter;
4793
4794         cld_filter = rte_zmalloc("tunnel_filter",
4795                 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4796                 0);
4797
4798         if (NULL == cld_filter) {
4799                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4800                 return -EINVAL;
4801         }
4802         pfilter = cld_filter;
4803
4804         (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4805                         sizeof(struct ether_addr));
4806         (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4807                         sizeof(struct ether_addr));
4808
4809         pfilter->inner_vlan = tunnel_filter->inner_vlan;
4810         if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4811                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4812                 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4813                                 &tunnel_filter->ip_addr,
4814                                 sizeof(pfilter->ipaddr.v4.data));
4815         } else {
4816                 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4817                 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4818                                 &tunnel_filter->ip_addr,
4819                                 sizeof(pfilter->ipaddr.v6.data));
4820         }
4821
4822         /* check tunneled type */
4823         switch (tunnel_filter->tunnel_type) {
4824         case RTE_TUNNEL_TYPE_VXLAN:
4825                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4826                 break;
4827         case RTE_TUNNEL_TYPE_NVGRE:
4828                 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
4829                 break;
4830         default:
4831                 /* Other tunnel types is not supported. */
4832                 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4833                 rte_free(cld_filter);
4834                 return -EINVAL;
4835         }
4836
4837         val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4838                                                 &pfilter->flags);
4839         if (val < 0) {
4840                 rte_free(cld_filter);
4841                 return -EINVAL;
4842         }
4843
4844         pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4845                 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4846         pfilter->tenant_id = tunnel_filter->tenant_id;
4847         pfilter->queue_number = tunnel_filter->queue_id;
4848
4849         if (add)
4850                 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4851         else
4852                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4853                                                 cld_filter, 1);
4854
4855         rte_free(cld_filter);
4856         return ret;
4857 }
4858
4859 static int
4860 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4861 {
4862         uint8_t i;
4863
4864         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4865                 if (pf->vxlan_ports[i] == port)
4866                         return i;
4867         }
4868
4869         return -1;
4870 }
4871
4872 static int
4873 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4874 {
4875         int  idx, ret;
4876         uint8_t filter_idx;
4877         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4878
4879         idx = i40e_get_vxlan_port_idx(pf, port);
4880
4881         /* Check if port already exists */
4882         if (idx >= 0) {
4883                 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4884                 return -EINVAL;
4885         }
4886
4887         /* Now check if there is space to add the new port */
4888         idx = i40e_get_vxlan_port_idx(pf, 0);
4889         if (idx < 0) {
4890                 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4891                         "not adding port %d", port);
4892                 return -ENOSPC;
4893         }
4894
4895         ret =  i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4896                                         &filter_idx, NULL);
4897         if (ret < 0) {
4898                 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4899                 return -1;
4900         }
4901
4902         PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
4903                          port,  filter_idx);
4904
4905         /* New port: add it and mark its index in the bitmap */
4906         pf->vxlan_ports[idx] = port;
4907         pf->vxlan_bitmap |= (1 << idx);
4908
4909         if (!(pf->flags & I40E_FLAG_VXLAN))
4910                 pf->flags |= I40E_FLAG_VXLAN;
4911
4912         return 0;
4913 }
4914
4915 static int
4916 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4917 {
4918         int idx;
4919         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4920
4921         if (!(pf->flags & I40E_FLAG_VXLAN)) {
4922                 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4923                 return -EINVAL;
4924         }
4925
4926         idx = i40e_get_vxlan_port_idx(pf, port);
4927
4928         if (idx < 0) {
4929                 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4930                 return -EINVAL;
4931         }
4932
4933         if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4934                 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4935                 return -1;
4936         }
4937
4938         PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4939                         port, idx);
4940
4941         pf->vxlan_ports[idx] = 0;
4942         pf->vxlan_bitmap &= ~(1 << idx);
4943
4944         if (!pf->vxlan_bitmap)
4945                 pf->flags &= ~I40E_FLAG_VXLAN;
4946
4947         return 0;
4948 }
4949
4950 /* Add UDP tunneling port */
4951 static int
4952 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4953                         struct rte_eth_udp_tunnel *udp_tunnel)
4954 {
4955         int ret = 0;
4956         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4957
4958         if (udp_tunnel == NULL)
4959                 return -EINVAL;
4960
4961         switch (udp_tunnel->prot_type) {
4962         case RTE_TUNNEL_TYPE_VXLAN:
4963                 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4964                 break;
4965
4966         case RTE_TUNNEL_TYPE_GENEVE:
4967         case RTE_TUNNEL_TYPE_TEREDO:
4968                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4969                 ret = -1;
4970                 break;
4971
4972         default:
4973                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4974                 ret = -1;
4975                 break;
4976         }
4977
4978         return ret;
4979 }
4980
4981 /* Remove UDP tunneling port */
4982 static int
4983 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4984                         struct rte_eth_udp_tunnel *udp_tunnel)
4985 {
4986         int ret = 0;
4987         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4988
4989         if (udp_tunnel == NULL)
4990                 return -EINVAL;
4991
4992         switch (udp_tunnel->prot_type) {
4993         case RTE_TUNNEL_TYPE_VXLAN:
4994                 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4995                 break;
4996         case RTE_TUNNEL_TYPE_GENEVE:
4997         case RTE_TUNNEL_TYPE_TEREDO:
4998                 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4999                 ret = -1;
5000                 break;
5001         default:
5002                 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5003                 ret = -1;
5004                 break;
5005         }
5006
5007         return ret;
5008 }
5009
5010 /* Calculate the maximum number of contiguous PF queues that are configured */
5011 static int
5012 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
5013 {
5014         struct rte_eth_dev_data *data = pf->dev_data;
5015         int i, num;
5016         struct i40e_rx_queue *rxq;
5017
5018         num = 0;
5019         for (i = 0; i < pf->lan_nb_qps; i++) {
5020                 rxq = data->rx_queues[i];
5021                 if (rxq && rxq->q_set)
5022                         num++;
5023                 else
5024                         break;
5025         }
5026
5027         return num;
5028 }
5029
5030 /* Configure RSS */
5031 static int
5032 i40e_pf_config_rss(struct i40e_pf *pf)
5033 {
5034         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5035         struct rte_eth_rss_conf rss_conf;
5036         uint32_t i, lut = 0;
5037         uint16_t j, num;
5038
5039         /*
5040          * If both VMDQ and RSS enabled, not all of PF queues are configured.
5041          * It's necessary to calulate the actual PF queues that are configured.
5042          */
5043         if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
5044                 num = i40e_pf_calc_configured_queues_num(pf);
5045                 num = i40e_align_floor(num);
5046         } else
5047                 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
5048
5049         PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
5050                         num);
5051
5052         if (num == 0) {
5053                 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
5054                 return -ENOTSUP;
5055         }
5056
5057         for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
5058                 if (j == num)
5059                         j = 0;
5060                 lut = (lut << 8) | (j & ((0x1 <<
5061                         hw->func_caps.rss_table_entry_width) - 1));
5062                 if ((i & 3) == 3)
5063                         I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
5064         }
5065
5066         rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
5067         if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
5068                 i40e_pf_disable_rss(pf);
5069                 return 0;
5070         }
5071         if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
5072                 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
5073                 /* Random default keys */
5074                 static uint32_t rss_key_default[] = {0x6b793944,
5075                         0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
5076                         0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
5077                         0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
5078
5079                 rss_conf.rss_key = (uint8_t *)rss_key_default;
5080                 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5081                                                         sizeof(uint32_t);
5082         }
5083
5084         return i40e_hw_rss_hash_set(hw, &rss_conf);
5085 }
5086
5087 static int
5088 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
5089                         struct rte_eth_tunnel_filter_conf *filter)
5090 {
5091         if (pf == NULL || filter == NULL) {
5092                 PMD_DRV_LOG(ERR, "Invalid parameter");
5093                 return -EINVAL;
5094         }
5095
5096         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
5097                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5098                 return -EINVAL;
5099         }
5100
5101         if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
5102                 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
5103                 return -EINVAL;
5104         }
5105
5106         if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
5107                 (is_zero_ether_addr(filter->outer_mac))) {
5108                 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
5109                 return -EINVAL;
5110         }
5111
5112         if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
5113                 (is_zero_ether_addr(filter->inner_mac))) {
5114                 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
5115                 return -EINVAL;
5116         }
5117
5118         return 0;
5119 }
5120
5121 static int
5122 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5123                         void *arg)
5124 {
5125         struct rte_eth_tunnel_filter_conf *filter;
5126         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5127         int ret = I40E_SUCCESS;
5128
5129         filter = (struct rte_eth_tunnel_filter_conf *)(arg);
5130
5131         if (i40e_tunnel_filter_param_check(pf, filter) < 0)
5132                 return I40E_ERR_PARAM;
5133
5134         switch (filter_op) {
5135         case RTE_ETH_FILTER_NOP:
5136                 if (!(pf->flags & I40E_FLAG_VXLAN))
5137                         ret = I40E_NOT_SUPPORTED;
5138         case RTE_ETH_FILTER_ADD:
5139                 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
5140                 break;
5141         case RTE_ETH_FILTER_DELETE:
5142                 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
5143                 break;
5144         default:
5145                 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
5146                 ret = I40E_ERR_PARAM;
5147                 break;
5148         }
5149
5150         return ret;
5151 }
5152
5153 static int
5154 i40e_pf_config_mq_rx(struct i40e_pf *pf)
5155 {
5156         int ret = 0;
5157         enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
5158
5159         if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
5160                 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
5161                 return -ENOTSUP;
5162         }
5163
5164         /* RSS setup */
5165         if (mq_mode & ETH_MQ_RX_RSS_FLAG)
5166                 ret = i40e_pf_config_rss(pf);
5167         else
5168                 i40e_pf_disable_rss(pf);
5169
5170         return ret;
5171 }
5172
5173 /* Get the symmetric hash enable configurations per port */
5174 static void
5175 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
5176 {
5177         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5178
5179         *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
5180 }
5181
5182 /* Set the symmetric hash enable configurations per port */
5183 static void
5184 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
5185 {
5186         uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5187
5188         if (enable > 0) {
5189                 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
5190                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5191                                                         "been enabled");
5192                         return;
5193                 }
5194                 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5195         } else {
5196                 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
5197                         PMD_DRV_LOG(INFO, "Symmetric hash has already "
5198                                                         "been disabled");
5199                         return;
5200                 }
5201                 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5202         }
5203         I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
5204         I40E_WRITE_FLUSH(hw);
5205 }
5206
5207 /*
5208  * Get global configurations of hash function type and symmetric hash enable
5209  * per flow type (pctype). Note that global configuration means it affects all
5210  * the ports on the same NIC.
5211  */
5212 static int
5213 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
5214                                    struct rte_eth_hash_global_conf *g_cfg)
5215 {
5216         uint32_t reg, mask = I40E_FLOW_TYPES;
5217         uint16_t i;
5218         enum i40e_filter_pctype pctype;
5219
5220         memset(g_cfg, 0, sizeof(*g_cfg));
5221         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5222         if (reg & I40E_GLQF_CTL_HTOEP_MASK)
5223                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
5224         else
5225                 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
5226         PMD_DRV_LOG(DEBUG, "Hash function is %s",
5227                 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
5228
5229         for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
5230                 if (!(mask & (1UL << i)))
5231                         continue;
5232                 mask &= ~(1UL << i);
5233                 /* Bit set indicats the coresponding flow type is supported */
5234                 g_cfg->valid_bit_mask[0] |= (1UL << i);
5235                 pctype = i40e_flowtype_to_pctype(i);
5236                 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
5237                 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
5238                         g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
5239         }
5240
5241         return 0;
5242 }
5243
5244 static int
5245 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
5246 {
5247         uint32_t i;
5248         uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
5249
5250         if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
5251                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5252                 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
5253                 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
5254                                                 g_cfg->hash_func);
5255                 return -EINVAL;
5256         }
5257
5258         /*
5259          * As i40e supports less than 32 flow types, only first 32 bits need to
5260          * be checked.
5261          */
5262         mask0 = g_cfg->valid_bit_mask[0];
5263         for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
5264                 if (i == 0) {
5265                         /* Check if any unsupported flow type configured */
5266                         if ((mask0 | i40e_mask) ^ i40e_mask)
5267                                 goto mask_err;
5268                 } else {
5269                         if (g_cfg->valid_bit_mask[i])
5270                                 goto mask_err;
5271                 }
5272         }
5273
5274         return 0;
5275
5276 mask_err:
5277         PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
5278
5279         return -EINVAL;
5280 }
5281
5282 /*
5283  * Set global configurations of hash function type and symmetric hash enable
5284  * per flow type (pctype). Note any modifying global configuration will affect
5285  * all the ports on the same NIC.
5286  */
5287 static int
5288 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
5289                                    struct rte_eth_hash_global_conf *g_cfg)
5290 {
5291         int ret;
5292         uint16_t i;
5293         uint32_t reg;
5294         uint32_t mask0 = g_cfg->valid_bit_mask[0];
5295         enum i40e_filter_pctype pctype;
5296
5297         /* Check the input parameters */
5298         ret = i40e_hash_global_config_check(g_cfg);
5299         if (ret < 0)
5300                 return ret;
5301
5302         for (i = 0; mask0 && i < UINT32_BIT; i++) {
5303                 if (!(mask0 & (1UL << i)))
5304                         continue;
5305                 mask0 &= ~(1UL << i);
5306                 pctype = i40e_flowtype_to_pctype(i);
5307                 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
5308                                 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
5309                 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
5310         }
5311
5312         reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5313         if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
5314                 /* Toeplitz */
5315                 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
5316                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5317                                                                 "Toeplitz");
5318                         goto out;
5319                 }
5320                 reg |= I40E_GLQF_CTL_HTOEP_MASK;
5321         } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
5322                 /* Simple XOR */
5323                 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
5324                         PMD_DRV_LOG(DEBUG, "Hash function already set to "
5325                                                         "Simple XOR");
5326                         goto out;
5327                 }
5328                 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
5329         } else
5330                 /* Use the default, and keep it as it is */
5331                 goto out;
5332
5333         I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
5334
5335 out:
5336         I40E_WRITE_FLUSH(hw);
5337
5338         return 0;
5339 }
5340
5341 static int
5342 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5343 {
5344         int ret = 0;
5345
5346         if (!hw || !info) {
5347                 PMD_DRV_LOG(ERR, "Invalid pointer");
5348                 return -EFAULT;
5349         }
5350
5351         switch (info->info_type) {
5352         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5353                 i40e_get_symmetric_hash_enable_per_port(hw,
5354                                         &(info->info.enable));
5355                 break;
5356         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5357                 ret = i40e_get_hash_filter_global_config(hw,
5358                                 &(info->info.global_conf));
5359                 break;
5360         default:
5361                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5362                                                         info->info_type);
5363                 ret = -EINVAL;
5364                 break;
5365         }
5366
5367         return ret;
5368 }
5369
5370 static int
5371 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5372 {
5373         int ret = 0;
5374
5375         if (!hw || !info) {
5376                 PMD_DRV_LOG(ERR, "Invalid pointer");
5377                 return -EFAULT;
5378         }
5379
5380         switch (info->info_type) {
5381         case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5382                 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
5383                 break;
5384         case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5385                 ret = i40e_set_hash_filter_global_config(hw,
5386                                 &(info->info.global_conf));
5387                 break;
5388         default:
5389                 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5390                                                         info->info_type);
5391                 ret = -EINVAL;
5392                 break;
5393         }
5394
5395         return ret;
5396 }
5397
5398 /* Operations for hash function */
5399 static int
5400 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
5401                       enum rte_filter_op filter_op,
5402                       void *arg)
5403 {
5404         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5405         int ret = 0;
5406
5407         switch (filter_op) {
5408         case RTE_ETH_FILTER_NOP:
5409                 break;
5410         case RTE_ETH_FILTER_GET:
5411                 ret = i40e_hash_filter_get(hw,
5412                         (struct rte_eth_hash_filter_info *)arg);
5413                 break;
5414         case RTE_ETH_FILTER_SET:
5415                 ret = i40e_hash_filter_set(hw,
5416                         (struct rte_eth_hash_filter_info *)arg);
5417                 break;
5418         default:
5419                 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
5420                                                                 filter_op);
5421                 ret = -ENOTSUP;
5422                 break;
5423         }
5424
5425         return ret;
5426 }
5427
5428 /*
5429  * Configure ethertype filter, which can director packet by filtering
5430  * with mac address and ether_type or only ether_type
5431  */
5432 static int
5433 i40e_ethertype_filter_set(struct i40e_pf *pf,
5434                         struct rte_eth_ethertype_filter *filter,
5435                         bool add)
5436 {
5437         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5438         struct i40e_control_filter_stats stats;
5439         uint16_t flags = 0;
5440         int ret;
5441
5442         if (filter->queue >= pf->dev_data->nb_rx_queues) {
5443                 PMD_DRV_LOG(ERR, "Invalid queue ID");
5444                 return -EINVAL;
5445         }
5446         if (filter->ether_type == ETHER_TYPE_IPv4 ||
5447                 filter->ether_type == ETHER_TYPE_IPv6) {
5448                 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5449                         " control packet filter.", filter->ether_type);
5450                 return -EINVAL;
5451         }
5452         if (filter->ether_type == ETHER_TYPE_VLAN)
5453                 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
5454                         " not supported.");
5455
5456         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5457                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5458         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5459                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5460         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5461
5462         memset(&stats, 0, sizeof(stats));
5463         ret = i40e_aq_add_rem_control_packet_filter(hw,
5464                         filter->mac_addr.addr_bytes,
5465                         filter->ether_type, flags,
5466                         pf->main_vsi->seid,
5467                         filter->queue, add, &stats, NULL);
5468
5469         PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
5470                          " mac_etype_used = %u, etype_used = %u,"
5471                          " mac_etype_free = %u, etype_free = %u\n",
5472                          ret, stats.mac_etype_used, stats.etype_used,
5473                          stats.mac_etype_free, stats.etype_free);
5474         if (ret < 0)
5475                 return -ENOSYS;
5476         return 0;
5477 }
5478
5479 /*
5480  * Handle operations for ethertype filter.
5481  */
5482 static int
5483 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
5484                                 enum rte_filter_op filter_op,
5485                                 void *arg)
5486 {
5487         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5488         int ret = 0;
5489
5490         if (filter_op == RTE_ETH_FILTER_NOP)
5491                 return ret;
5492
5493         if (arg == NULL) {
5494                 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5495                             filter_op);
5496                 return -EINVAL;
5497         }
5498
5499         switch (filter_op) {
5500         case RTE_ETH_FILTER_ADD:
5501                 ret = i40e_ethertype_filter_set(pf,
5502                         (struct rte_eth_ethertype_filter *)arg,
5503                         TRUE);
5504                 break;
5505         case RTE_ETH_FILTER_DELETE:
5506                 ret = i40e_ethertype_filter_set(pf,
5507                         (struct rte_eth_ethertype_filter *)arg,
5508                         FALSE);
5509                 break;
5510         default:
5511                 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5512                 ret = -ENOSYS;
5513                 break;
5514         }
5515         return ret;
5516 }
5517
5518 static int
5519 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
5520                      enum rte_filter_type filter_type,
5521                      enum rte_filter_op filter_op,
5522                      void *arg)
5523 {
5524         int ret = 0;
5525
5526         if (dev == NULL)
5527                 return -EINVAL;
5528
5529         switch (filter_type) {
5530         case RTE_ETH_FILTER_HASH:
5531                 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
5532                 break;
5533         case RTE_ETH_FILTER_MACVLAN:
5534                 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5535                 break;
5536         case RTE_ETH_FILTER_ETHERTYPE:
5537                 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
5538                 break;
5539         case RTE_ETH_FILTER_TUNNEL:
5540                 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5541                 break;
5542         case RTE_ETH_FILTER_FDIR:
5543                 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
5544                 break;
5545         default:
5546                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5547                                                         filter_type);
5548                 ret = -EINVAL;
5549                 break;
5550         }
5551
5552         return ret;
5553 }
5554
5555 /*
5556  * As some registers wouldn't be reset unless a global hardware reset,
5557  * hardware initialization is needed to put those registers into an
5558  * expected initial state.
5559  */
5560 static void
5561 i40e_hw_init(struct i40e_hw *hw)
5562 {
5563         /* clear the PF Queue Filter control register */
5564         I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
5565
5566         /* Disable symmetric hash per port */
5567         i40e_set_symmetric_hash_enable_per_port(hw, 0);
5568 }
5569
5570 enum i40e_filter_pctype
5571 i40e_flowtype_to_pctype(uint16_t flow_type)
5572 {
5573         static const enum i40e_filter_pctype pctype_table[] = {
5574                 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
5575                 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
5576                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5577                 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
5578                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5579                 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
5580                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5581                 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
5582                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5583                 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
5584                 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
5585                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
5586                 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
5587                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
5588                 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
5589                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
5590                 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
5591                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
5592                 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
5593         };
5594
5595         return pctype_table[flow_type];
5596 }
5597
5598 uint16_t
5599 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
5600 {
5601         static const uint16_t flowtype_table[] = {
5602                 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
5603                 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
5604                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
5605                 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
5606                         RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
5607                 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
5608                         RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
5609                 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
5610                         RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
5611                 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
5612                 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
5613                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
5614                 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
5615                         RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
5616                 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
5617                         RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
5618                 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
5619                         RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
5620                 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
5621         };
5622
5623         return flowtype_table[pctype];
5624 }
5625
5626 /*
5627  * On X710, performance number is far from the expectation on recent firmware
5628  * versions; on XL710, performance number is also far from the expectation on
5629  * recent firmware versions, if promiscuous mode is disabled, or promiscuous
5630  * mode is enabled and port MAC address is equal to the packet destination MAC
5631  * address. The fix for this issue may not be integrated in the following
5632  * firmware version. So the workaround in software driver is needed. It needs
5633  * to modify the initial values of 3 internal only registers for both X710 and
5634  * XL710. Note that the values for X710 or XL710 could be different, and the
5635  * workaround can be removed when it is fixed in firmware in the future.
5636  */
5637
5638 /* For both X710 and XL710 */
5639 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
5640 #define I40E_GL_SWR_PRI_JOIN_MAP_0       0x26CE00
5641
5642 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
5643 #define I40E_GL_SWR_PRI_JOIN_MAP_2       0x26CE08
5644
5645 /* For X710 */
5646 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE   0x03030303
5647 /* For XL710 */
5648 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE   0x06060606
5649 #define I40E_GL_SWR_PM_UP_THR            0x269FBC
5650
5651 static void
5652 i40e_configure_registers(struct i40e_hw *hw)
5653 {
5654         static struct {
5655                 uint32_t addr;
5656                 uint64_t val;
5657         } reg_table[] = {
5658                 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
5659                 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
5660                 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
5661         };
5662         uint64_t reg;
5663         uint32_t i;
5664         int ret;
5665
5666         for (i = 0; i < RTE_DIM(reg_table); i++) {
5667                 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
5668                         if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
5669                                 reg_table[i].val =
5670                                         I40E_GL_SWR_PM_UP_THR_SF_VALUE;
5671                         else /* For X710 */
5672                                 reg_table[i].val =
5673                                         I40E_GL_SWR_PM_UP_THR_EF_VALUE;
5674                 }
5675
5676                 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
5677                                                         &reg, NULL);
5678                 if (ret < 0) {
5679                         PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
5680                                                         reg_table[i].addr);
5681                         break;
5682                 }
5683                 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
5684                                                 reg_table[i].addr, reg);
5685                 if (reg == reg_table[i].val)
5686                         continue;
5687
5688                 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
5689                                                 reg_table[i].val, NULL);
5690                 if (ret < 0) {
5691                         PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
5692                                 "address of 0x%"PRIx32, reg_table[i].val,
5693                                                         reg_table[i].addr);
5694                         break;
5695                 }
5696                 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
5697                         "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
5698         }
5699 }