4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
52 #include <rte_eth_ctrl.h>
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_adminq_cmd.h"
57 #include "base/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
62 /* Maximun number of MAC addresses */
63 #define I40E_NUM_MACADDR_MAX 64
64 #define I40E_CLEAR_PXE_WAIT_MS 200
66 /* Maximun number of capability elements */
67 #define I40E_MAX_CAP_ELE_NUM 128
69 /* Wait count and inteval */
70 #define I40E_CHK_Q_ENA_COUNT 1000
71 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
73 /* Maximun number of VSI */
74 #define I40E_MAX_NUM_VSIS (384UL)
76 /* Default queue interrupt throttling time in microseconds */
77 #define I40E_ITR_INDEX_DEFAULT 0
78 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
79 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
81 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
83 /* Mask of PF interrupt causes */
84 #define I40E_PFINT_ICR0_ENA_MASK ( \
85 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
86 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
87 I40E_PFINT_ICR0_ENA_GRST_MASK | \
88 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
89 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
90 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
91 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
92 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
93 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
94 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
96 #define I40E_FLOW_TYPES ( \
97 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
98 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
99 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
100 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
101 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
102 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
103 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
104 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
105 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
106 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
107 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
109 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
110 static int i40e_dev_configure(struct rte_eth_dev *dev);
111 static int i40e_dev_start(struct rte_eth_dev *dev);
112 static void i40e_dev_stop(struct rte_eth_dev *dev);
113 static void i40e_dev_close(struct rte_eth_dev *dev);
114 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
115 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
116 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
117 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
118 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
119 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
120 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
121 struct rte_eth_stats *stats);
122 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
123 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
127 static void i40e_dev_info_get(struct rte_eth_dev *dev,
128 struct rte_eth_dev_info *dev_info);
129 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
132 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
133 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
134 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
137 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
138 static int i40e_dev_led_on(struct rte_eth_dev *dev);
139 static int i40e_dev_led_off(struct rte_eth_dev *dev);
140 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
141 struct rte_eth_fc_conf *fc_conf);
142 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
143 struct rte_eth_pfc_conf *pfc_conf);
144 static void i40e_macaddr_add(struct rte_eth_dev *dev,
145 struct ether_addr *mac_addr,
148 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
149 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
150 struct rte_eth_rss_reta_entry64 *reta_conf,
152 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
153 struct rte_eth_rss_reta_entry64 *reta_conf,
156 static int i40e_get_cap(struct i40e_hw *hw);
157 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
158 static int i40e_pf_setup(struct i40e_pf *pf);
159 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
160 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
161 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
162 bool offset_loaded, uint64_t *offset, uint64_t *stat);
163 static void i40e_stat_update_48(struct i40e_hw *hw,
169 static void i40e_pf_config_irq0(struct i40e_hw *hw);
170 static void i40e_dev_interrupt_handler(
171 __rte_unused struct rte_intr_handle *handle, void *param);
172 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
173 uint32_t base, uint32_t num);
174 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
175 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
177 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
179 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
180 static int i40e_veb_release(struct i40e_veb *veb);
181 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
182 struct i40e_vsi *vsi);
183 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
184 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
185 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
186 struct i40e_macvlan_filter *mv_f,
188 struct ether_addr *addr);
189 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
190 struct i40e_macvlan_filter *mv_f,
193 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
194 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
195 struct rte_eth_rss_conf *rss_conf);
196 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
197 struct rte_eth_rss_conf *rss_conf);
198 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
199 struct rte_eth_udp_tunnel *udp_tunnel);
200 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
201 struct rte_eth_udp_tunnel *udp_tunnel);
202 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
203 struct rte_eth_ethertype_filter *filter,
205 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
206 enum rte_filter_op filter_op,
208 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
209 enum rte_filter_type filter_type,
210 enum rte_filter_op filter_op,
212 static void i40e_configure_registers(struct i40e_hw *hw);
213 static void i40e_hw_init(struct i40e_hw *hw);
215 static const struct rte_pci_id pci_id_i40e_map[] = {
216 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
217 #include "rte_pci_dev_ids.h"
218 { .vendor_id = 0, /* sentinel */ },
221 static const struct eth_dev_ops i40e_eth_dev_ops = {
222 .dev_configure = i40e_dev_configure,
223 .dev_start = i40e_dev_start,
224 .dev_stop = i40e_dev_stop,
225 .dev_close = i40e_dev_close,
226 .promiscuous_enable = i40e_dev_promiscuous_enable,
227 .promiscuous_disable = i40e_dev_promiscuous_disable,
228 .allmulticast_enable = i40e_dev_allmulticast_enable,
229 .allmulticast_disable = i40e_dev_allmulticast_disable,
230 .dev_set_link_up = i40e_dev_set_link_up,
231 .dev_set_link_down = i40e_dev_set_link_down,
232 .link_update = i40e_dev_link_update,
233 .stats_get = i40e_dev_stats_get,
234 .stats_reset = i40e_dev_stats_reset,
235 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
236 .dev_infos_get = i40e_dev_info_get,
237 .vlan_filter_set = i40e_vlan_filter_set,
238 .vlan_tpid_set = i40e_vlan_tpid_set,
239 .vlan_offload_set = i40e_vlan_offload_set,
240 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
241 .vlan_pvid_set = i40e_vlan_pvid_set,
242 .rx_queue_start = i40e_dev_rx_queue_start,
243 .rx_queue_stop = i40e_dev_rx_queue_stop,
244 .tx_queue_start = i40e_dev_tx_queue_start,
245 .tx_queue_stop = i40e_dev_tx_queue_stop,
246 .rx_queue_setup = i40e_dev_rx_queue_setup,
247 .rx_queue_release = i40e_dev_rx_queue_release,
248 .rx_queue_count = i40e_dev_rx_queue_count,
249 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
250 .tx_queue_setup = i40e_dev_tx_queue_setup,
251 .tx_queue_release = i40e_dev_tx_queue_release,
252 .dev_led_on = i40e_dev_led_on,
253 .dev_led_off = i40e_dev_led_off,
254 .flow_ctrl_set = i40e_flow_ctrl_set,
255 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
256 .mac_addr_add = i40e_macaddr_add,
257 .mac_addr_remove = i40e_macaddr_remove,
258 .reta_update = i40e_dev_rss_reta_update,
259 .reta_query = i40e_dev_rss_reta_query,
260 .rss_hash_update = i40e_dev_rss_hash_update,
261 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
262 .udp_tunnel_add = i40e_dev_udp_tunnel_add,
263 .udp_tunnel_del = i40e_dev_udp_tunnel_del,
264 .filter_ctrl = i40e_dev_filter_ctrl,
267 static struct eth_driver rte_i40e_pmd = {
269 .name = "rte_i40e_pmd",
270 .id_table = pci_id_i40e_map,
271 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
273 .eth_dev_init = eth_i40e_dev_init,
274 .dev_private_size = sizeof(struct i40e_adapter),
278 i40e_align_floor(int n)
282 return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
286 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
287 struct rte_eth_link *link)
289 struct rte_eth_link *dst = link;
290 struct rte_eth_link *src = &(dev->data->dev_link);
292 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
293 *(uint64_t *)src) == 0)
300 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
301 struct rte_eth_link *link)
303 struct rte_eth_link *dst = &(dev->data->dev_link);
304 struct rte_eth_link *src = link;
306 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
307 *(uint64_t *)src) == 0)
314 * Driver initialization routine.
315 * Invoked once at EAL init time.
316 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
319 rte_i40e_pmd_init(const char *name __rte_unused,
320 const char *params __rte_unused)
322 PMD_INIT_FUNC_TRACE();
323 rte_eth_driver_register(&rte_i40e_pmd);
328 static struct rte_driver rte_i40e_driver = {
330 .init = rte_i40e_pmd_init,
333 PMD_REGISTER_DRIVER(rte_i40e_driver);
336 * Initialize registers for flexible payload, which should be set by NVM.
337 * This should be removed from code once it is fixed in NVM.
339 #ifndef I40E_GLQF_ORT
340 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
342 #ifndef I40E_GLQF_PIT
343 #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
346 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
348 I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
349 I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
350 I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
351 I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
352 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
353 I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
354 I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
355 I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
356 I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
357 I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
359 /* GLQF_PIT Registers */
360 I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
361 I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
365 eth_i40e_dev_init(struct rte_eth_dev *dev)
367 struct rte_pci_device *pci_dev;
368 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
369 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
370 struct i40e_vsi *vsi;
375 PMD_INIT_FUNC_TRACE();
377 dev->dev_ops = &i40e_eth_dev_ops;
378 dev->rx_pkt_burst = i40e_recv_pkts;
379 dev->tx_pkt_burst = i40e_xmit_pkts;
381 /* for secondary processes, we don't initialise any further as primary
382 * has already done this work. Only check we don't need a different
384 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
385 if (dev->data->scattered_rx)
386 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
389 pci_dev = dev->pci_dev;
390 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
391 pf->adapter->eth_dev = dev;
392 pf->dev_data = dev->data;
394 hw->back = I40E_PF_TO_ADAPTER(pf);
395 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
397 PMD_INIT_LOG(ERR, "Hardware is not available, "
398 "as address is NULL");
402 hw->vendor_id = pci_dev->id.vendor_id;
403 hw->device_id = pci_dev->id.device_id;
404 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
405 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
406 hw->bus.device = pci_dev->addr.devid;
407 hw->bus.func = pci_dev->addr.function;
409 /* Make sure all is clean before doing PF reset */
412 /* Initialize the hardware */
415 /* Reset here to make sure all is clean for each PF */
416 ret = i40e_pf_reset(hw);
418 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
422 /* Initialize the shared code (base driver) */
423 ret = i40e_init_shared_code(hw);
425 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
430 * To work around the NVM issue,initialize registers
431 * for flexible payload by software.
432 * It should be removed once issues are fixed in NVM.
434 i40e_flex_payload_reg_init(hw);
436 /* Initialize the parameters for adminq */
437 i40e_init_adminq_parameter(hw);
438 ret = i40e_init_adminq(hw);
439 if (ret != I40E_SUCCESS) {
440 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
443 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
444 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
445 hw->aq.api_maj_ver, hw->aq.api_min_ver,
446 ((hw->nvm.version >> 12) & 0xf),
447 ((hw->nvm.version >> 4) & 0xff),
448 (hw->nvm.version & 0xf), hw->nvm.eetrack);
451 ret = i40e_aq_stop_lldp(hw, true, NULL);
452 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
453 PMD_INIT_LOG(INFO, "Failed to stop lldp");
456 i40e_clear_pxe_mode(hw);
459 * On X710, performance number is far from the expectation on recent
460 * firmware versions. The fix for this issue may not be integrated in
461 * the following firmware version. So the workaround in software driver
462 * is needed. It needs to modify the initial values of 3 internal only
463 * registers. Note that the workaround can be removed when it is fixed
464 * in firmware in the future.
466 i40e_configure_registers(hw);
468 /* Get hw capabilities */
469 ret = i40e_get_cap(hw);
470 if (ret != I40E_SUCCESS) {
471 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
472 goto err_get_capabilities;
475 /* Initialize parameters for PF */
476 ret = i40e_pf_parameter_init(dev);
478 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
479 goto err_parameter_init;
482 /* Initialize the queue management */
483 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
485 PMD_INIT_LOG(ERR, "Failed to init queue pool");
486 goto err_qp_pool_init;
488 ret = i40e_res_pool_init(&pf->msix_pool, 1,
489 hw->func_caps.num_msix_vectors - 1);
491 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
492 goto err_msix_pool_init;
495 /* Initialize lan hmc */
496 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
497 hw->func_caps.num_rx_qp, 0, 0);
498 if (ret != I40E_SUCCESS) {
499 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
500 goto err_init_lan_hmc;
503 /* Configure lan hmc */
504 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
505 if (ret != I40E_SUCCESS) {
506 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
507 goto err_configure_lan_hmc;
510 /* Get and check the mac address */
511 i40e_get_mac_addr(hw, hw->mac.addr);
512 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
513 PMD_INIT_LOG(ERR, "mac address is not valid");
515 goto err_get_mac_addr;
517 /* Copy the permanent MAC address */
518 ether_addr_copy((struct ether_addr *) hw->mac.addr,
519 (struct ether_addr *) hw->mac.perm_addr);
521 /* Disable flow control */
522 hw->fc.requested_mode = I40E_FC_NONE;
523 i40e_set_fc(hw, &aq_fail, TRUE);
525 /* PF setup, which includes VSI setup */
526 ret = i40e_pf_setup(pf);
528 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
529 goto err_setup_pf_switch;
534 /* Disable double vlan by default */
535 i40e_vsi_config_double_vlan(vsi, FALSE);
537 if (!vsi->max_macaddrs)
538 len = ETHER_ADDR_LEN;
540 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
542 /* Should be after VSI initialized */
543 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
544 if (!dev->data->mac_addrs) {
545 PMD_INIT_LOG(ERR, "Failed to allocated memory "
546 "for storing mac address");
549 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
550 &dev->data->mac_addrs[0]);
552 /* initialize pf host driver to setup SRIOV resource if applicable */
553 i40e_pf_host_init(dev);
555 /* register callback func to eal lib */
556 rte_intr_callback_register(&(pci_dev->intr_handle),
557 i40e_dev_interrupt_handler, (void *)dev);
559 /* configure and enable device interrupt */
560 i40e_pf_config_irq0(hw);
561 i40e_pf_enable_irq0(hw);
563 /* enable uio intr after callback register */
564 rte_intr_enable(&(pci_dev->intr_handle));
569 i40e_vsi_release(pf->main_vsi);
572 err_configure_lan_hmc:
573 (void)i40e_shutdown_lan_hmc(hw);
575 i40e_res_pool_destroy(&pf->msix_pool);
577 i40e_res_pool_destroy(&pf->qp_pool);
580 err_get_capabilities:
581 (void)i40e_shutdown_adminq(hw);
587 i40e_dev_configure(struct rte_eth_dev *dev)
589 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
593 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
594 ret = i40e_fdir_setup(pf);
595 if (ret != I40E_SUCCESS) {
596 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
599 ret = i40e_fdir_configure(dev);
601 PMD_DRV_LOG(ERR, "failed to configure fdir.");
605 i40e_fdir_teardown(pf);
607 ret = i40e_dev_init_vlan(dev);
612 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
613 * RSS setting have different requirements.
614 * General PMD driver call sequence are NIC init, configure,
615 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
616 * will try to lookup the VSI that specific queue belongs to if VMDQ
617 * applicable. So, VMDQ setting has to be done before
618 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
619 * For RSS setting, it will try to calculate actual configured RX queue
620 * number, which will be available after rx_queue_setup(). dev_start()
621 * function is good to place RSS setup.
623 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
624 ret = i40e_vmdq_setup(dev);
630 i40e_fdir_teardown(pf);
635 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
637 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
638 uint16_t msix_vect = vsi->msix_intr;
641 for (i = 0; i < vsi->nb_qps; i++) {
642 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
643 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
647 if (vsi->type != I40E_VSI_SRIOV) {
648 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
649 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
653 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
654 vsi->user_param + (msix_vect - 1);
656 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
658 I40E_WRITE_FLUSH(hw);
661 static inline uint16_t
662 i40e_calc_itr_interval(int16_t interval)
664 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
665 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
667 /* Convert to hardware count, as writing each 1 represents 2 us */
672 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
675 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
676 uint16_t msix_vect = vsi->msix_intr;
679 for (i = 0; i < vsi->nb_qps; i++)
680 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
682 /* Bind all RX queues to allocated MSIX interrupt */
683 for (i = 0; i < vsi->nb_qps; i++) {
684 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
685 I40E_QINT_RQCTL_ITR_INDX_MASK |
686 ((vsi->base_queue + i + 1) <<
687 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
688 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
689 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
691 if (i == vsi->nb_qps - 1)
692 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
693 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
696 /* Write first RX queue to Link list register as the head element */
697 if (vsi->type != I40E_VSI_SRIOV) {
699 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
701 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
703 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
704 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
706 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
707 msix_vect - 1), interval);
709 #ifndef I40E_GLINT_CTL
710 #define I40E_GLINT_CTL 0x0003F800
711 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
713 /* Disable auto-mask on enabling of all none-zero interrupt */
714 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
715 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
719 /* num_msix_vectors_vf needs to minus irq0 */
720 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
721 vsi->user_param + (msix_vect - 1);
723 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
724 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
725 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
728 I40E_WRITE_FLUSH(hw);
732 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
734 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
735 uint16_t interval = i40e_calc_itr_interval(\
736 RTE_LIBRTE_I40E_ITR_INTERVAL);
738 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
739 I40E_PFINT_DYN_CTLN_INTENA_MASK |
740 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
741 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
742 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
746 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
748 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
750 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
753 static inline uint8_t
754 i40e_parse_link_speed(uint16_t eth_link_speed)
756 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
758 switch (eth_link_speed) {
759 case ETH_LINK_SPEED_40G:
760 link_speed = I40E_LINK_SPEED_40GB;
762 case ETH_LINK_SPEED_20G:
763 link_speed = I40E_LINK_SPEED_20GB;
765 case ETH_LINK_SPEED_10G:
766 link_speed = I40E_LINK_SPEED_10GB;
768 case ETH_LINK_SPEED_1000:
769 link_speed = I40E_LINK_SPEED_1GB;
771 case ETH_LINK_SPEED_100:
772 link_speed = I40E_LINK_SPEED_100MB;
780 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
782 enum i40e_status_code status;
783 struct i40e_aq_get_phy_abilities_resp phy_ab;
784 struct i40e_aq_set_phy_config phy_conf;
785 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
786 I40E_AQ_PHY_FLAG_PAUSE_RX |
787 I40E_AQ_PHY_FLAG_LOW_POWER;
788 const uint8_t advt = I40E_LINK_SPEED_40GB |
789 I40E_LINK_SPEED_10GB |
790 I40E_LINK_SPEED_1GB |
791 I40E_LINK_SPEED_100MB;
794 /* Skip it on 40G interfaces, as a workaround for the link issue */
795 if (i40e_is_40G_device(hw->device_id))
798 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
803 memset(&phy_conf, 0, sizeof(phy_conf));
805 /* bits 0-2 use the values from get_phy_abilities_resp */
807 abilities |= phy_ab.abilities & mask;
809 /* update ablities and speed */
810 if (abilities & I40E_AQ_PHY_AN_ENABLED)
811 phy_conf.link_speed = advt;
813 phy_conf.link_speed = force_speed;
815 phy_conf.abilities = abilities;
817 /* use get_phy_abilities_resp value for the rest */
818 phy_conf.phy_type = phy_ab.phy_type;
819 phy_conf.eee_capability = phy_ab.eee_capability;
820 phy_conf.eeer = phy_ab.eeer_val;
821 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
823 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
824 phy_ab.abilities, phy_ab.link_speed);
825 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
826 phy_conf.abilities, phy_conf.link_speed);
828 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
836 i40e_apply_link_speed(struct rte_eth_dev *dev)
839 uint8_t abilities = 0;
840 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 struct rte_eth_conf *conf = &dev->data->dev_conf;
843 speed = i40e_parse_link_speed(conf->link_speed);
844 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
845 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
846 abilities |= I40E_AQ_PHY_AN_ENABLED;
848 abilities |= I40E_AQ_PHY_LINK_ENABLED;
850 return i40e_phy_conf_link(hw, abilities, speed);
854 i40e_dev_start(struct rte_eth_dev *dev)
856 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
857 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 struct i40e_vsi *main_vsi = pf->main_vsi;
861 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
862 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
863 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
864 dev->data->dev_conf.link_duplex,
870 ret = i40e_dev_rxtx_init(pf);
871 if (ret != I40E_SUCCESS) {
872 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
876 /* Map queues with MSIX interrupt */
877 i40e_vsi_queues_bind_intr(main_vsi);
878 i40e_vsi_enable_queues_intr(main_vsi);
880 /* Map VMDQ VSI queues with MSIX interrupt */
881 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
882 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
883 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
886 /* enable FDIR MSIX interrupt */
887 if (pf->fdir.fdir_vsi) {
888 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
889 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
892 /* Enable all queues which have been configured */
893 ret = i40e_dev_switch_queues(pf, TRUE);
894 if (ret != I40E_SUCCESS) {
895 PMD_DRV_LOG(ERR, "Failed to enable VSI");
899 /* Enable receiving broadcast packets */
900 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
901 if (ret != I40E_SUCCESS)
902 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
904 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
905 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
907 if (ret != I40E_SUCCESS)
908 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
911 /* Apply link configure */
912 ret = i40e_apply_link_speed(dev);
913 if (I40E_SUCCESS != ret) {
914 PMD_DRV_LOG(ERR, "Fail to apply link setting");
921 i40e_dev_switch_queues(pf, FALSE);
922 i40e_dev_clear_queues(dev);
928 i40e_dev_stop(struct rte_eth_dev *dev)
930 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
931 struct i40e_vsi *main_vsi = pf->main_vsi;
934 /* Disable all queues */
935 i40e_dev_switch_queues(pf, FALSE);
937 /* un-map queues with interrupt registers */
938 i40e_vsi_disable_queues_intr(main_vsi);
939 i40e_vsi_queues_unbind_intr(main_vsi);
941 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
942 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
943 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
946 if (pf->fdir.fdir_vsi) {
947 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
948 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
950 /* Clear all queues and release memory */
951 i40e_dev_clear_queues(dev);
954 i40e_dev_set_link_down(dev);
959 i40e_dev_close(struct rte_eth_dev *dev)
961 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
962 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
965 PMD_INIT_FUNC_TRACE();
969 /* Disable interrupt */
970 i40e_pf_disable_irq0(hw);
971 rte_intr_disable(&(dev->pci_dev->intr_handle));
973 /* shutdown and destroy the HMC */
974 i40e_shutdown_lan_hmc(hw);
976 /* release all the existing VSIs and VEBs */
977 i40e_fdir_teardown(pf);
978 i40e_vsi_release(pf->main_vsi);
980 /* shutdown the adminq */
981 i40e_aq_queue_shutdown(hw, true);
982 i40e_shutdown_adminq(hw);
984 i40e_res_pool_destroy(&pf->qp_pool);
985 i40e_res_pool_destroy(&pf->msix_pool);
987 /* force a PF reset to clean anything leftover */
988 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
989 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
990 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
991 I40E_WRITE_FLUSH(hw);
995 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
997 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
998 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
999 struct i40e_vsi *vsi = pf->main_vsi;
1002 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1004 if (status != I40E_SUCCESS)
1005 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1007 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1009 if (status != I40E_SUCCESS)
1010 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1015 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1017 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1018 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1019 struct i40e_vsi *vsi = pf->main_vsi;
1022 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1024 if (status != I40E_SUCCESS)
1025 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1027 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1029 if (status != I40E_SUCCESS)
1030 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1034 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1036 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1037 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1038 struct i40e_vsi *vsi = pf->main_vsi;
1041 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1042 if (ret != I40E_SUCCESS)
1043 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1047 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1049 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1050 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1051 struct i40e_vsi *vsi = pf->main_vsi;
1054 if (dev->data->promiscuous == 1)
1055 return; /* must remain in all_multicast mode */
1057 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1058 vsi->seid, FALSE, NULL);
1059 if (ret != I40E_SUCCESS)
1060 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1064 * Set device link up.
1067 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1069 /* re-apply link speed setting */
1070 return i40e_apply_link_speed(dev);
1074 * Set device link down.
1077 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1079 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1080 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1081 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1083 return i40e_phy_conf_link(hw, abilities, speed);
1087 i40e_dev_link_update(struct rte_eth_dev *dev,
1088 int wait_to_complete)
1090 #define CHECK_INTERVAL 100 /* 100ms */
1091 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1092 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1093 struct i40e_link_status link_status;
1094 struct rte_eth_link link, old;
1096 unsigned rep_cnt = MAX_REPEAT_TIME;
1098 memset(&link, 0, sizeof(link));
1099 memset(&old, 0, sizeof(old));
1100 memset(&link_status, 0, sizeof(link_status));
1101 rte_i40e_dev_atomic_read_link_status(dev, &old);
1104 /* Get link status information from hardware */
1105 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1106 if (status != I40E_SUCCESS) {
1107 link.link_speed = ETH_LINK_SPEED_100;
1108 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1109 PMD_DRV_LOG(ERR, "Failed to get link info");
1113 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1114 if (!wait_to_complete)
1117 rte_delay_ms(CHECK_INTERVAL);
1118 } while (!link.link_status && rep_cnt--);
1120 if (!link.link_status)
1123 /* i40e uses full duplex only */
1124 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1126 /* Parse the link status */
1127 switch (link_status.link_speed) {
1128 case I40E_LINK_SPEED_100MB:
1129 link.link_speed = ETH_LINK_SPEED_100;
1131 case I40E_LINK_SPEED_1GB:
1132 link.link_speed = ETH_LINK_SPEED_1000;
1134 case I40E_LINK_SPEED_10GB:
1135 link.link_speed = ETH_LINK_SPEED_10G;
1137 case I40E_LINK_SPEED_20GB:
1138 link.link_speed = ETH_LINK_SPEED_20G;
1140 case I40E_LINK_SPEED_40GB:
1141 link.link_speed = ETH_LINK_SPEED_40G;
1144 link.link_speed = ETH_LINK_SPEED_100;
1149 rte_i40e_dev_atomic_write_link_status(dev, &link);
1150 if (link.link_status == old.link_status)
1156 /* Get all the statistics of a VSI */
1158 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1160 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1161 struct i40e_eth_stats *nes = &vsi->eth_stats;
1162 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1163 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1165 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1166 vsi->offset_loaded, &oes->rx_bytes,
1168 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1169 vsi->offset_loaded, &oes->rx_unicast,
1171 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1172 vsi->offset_loaded, &oes->rx_multicast,
1173 &nes->rx_multicast);
1174 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1175 vsi->offset_loaded, &oes->rx_broadcast,
1176 &nes->rx_broadcast);
1177 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1178 &oes->rx_discards, &nes->rx_discards);
1179 /* GLV_REPC not supported */
1180 /* GLV_RMPC not supported */
1181 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1182 &oes->rx_unknown_protocol,
1183 &nes->rx_unknown_protocol);
1184 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1185 vsi->offset_loaded, &oes->tx_bytes,
1187 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1188 vsi->offset_loaded, &oes->tx_unicast,
1190 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1191 vsi->offset_loaded, &oes->tx_multicast,
1192 &nes->tx_multicast);
1193 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1194 vsi->offset_loaded, &oes->tx_broadcast,
1195 &nes->tx_broadcast);
1196 /* GLV_TDPC not supported */
1197 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1198 &oes->tx_errors, &nes->tx_errors);
1199 vsi->offset_loaded = true;
1201 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1203 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
1204 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
1205 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
1206 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
1207 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
1208 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1209 nes->rx_unknown_protocol);
1210 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
1211 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
1212 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
1213 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
1214 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
1215 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
1216 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1220 /* Get all statistics of a port */
1222 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1225 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1226 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1227 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1228 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1230 /* Get statistics of struct i40e_eth_stats */
1231 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1232 I40E_GLPRT_GORCL(hw->port),
1233 pf->offset_loaded, &os->eth.rx_bytes,
1235 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1236 I40E_GLPRT_UPRCL(hw->port),
1237 pf->offset_loaded, &os->eth.rx_unicast,
1238 &ns->eth.rx_unicast);
1239 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1240 I40E_GLPRT_MPRCL(hw->port),
1241 pf->offset_loaded, &os->eth.rx_multicast,
1242 &ns->eth.rx_multicast);
1243 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1244 I40E_GLPRT_BPRCL(hw->port),
1245 pf->offset_loaded, &os->eth.rx_broadcast,
1246 &ns->eth.rx_broadcast);
1247 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1248 pf->offset_loaded, &os->eth.rx_discards,
1249 &ns->eth.rx_discards);
1250 /* GLPRT_REPC not supported */
1251 /* GLPRT_RMPC not supported */
1252 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1254 &os->eth.rx_unknown_protocol,
1255 &ns->eth.rx_unknown_protocol);
1256 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1257 I40E_GLPRT_GOTCL(hw->port),
1258 pf->offset_loaded, &os->eth.tx_bytes,
1260 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1261 I40E_GLPRT_UPTCL(hw->port),
1262 pf->offset_loaded, &os->eth.tx_unicast,
1263 &ns->eth.tx_unicast);
1264 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1265 I40E_GLPRT_MPTCL(hw->port),
1266 pf->offset_loaded, &os->eth.tx_multicast,
1267 &ns->eth.tx_multicast);
1268 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1269 I40E_GLPRT_BPTCL(hw->port),
1270 pf->offset_loaded, &os->eth.tx_broadcast,
1271 &ns->eth.tx_broadcast);
1272 /* GLPRT_TEPC not supported */
1274 /* additional port specific stats */
1275 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1276 pf->offset_loaded, &os->tx_dropped_link_down,
1277 &ns->tx_dropped_link_down);
1278 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1279 pf->offset_loaded, &os->crc_errors,
1281 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1282 pf->offset_loaded, &os->illegal_bytes,
1283 &ns->illegal_bytes);
1284 /* GLPRT_ERRBC not supported */
1285 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1286 pf->offset_loaded, &os->mac_local_faults,
1287 &ns->mac_local_faults);
1288 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1289 pf->offset_loaded, &os->mac_remote_faults,
1290 &ns->mac_remote_faults);
1291 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1292 pf->offset_loaded, &os->rx_length_errors,
1293 &ns->rx_length_errors);
1294 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1295 pf->offset_loaded, &os->link_xon_rx,
1297 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1298 pf->offset_loaded, &os->link_xoff_rx,
1300 for (i = 0; i < 8; i++) {
1301 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1303 &os->priority_xon_rx[i],
1304 &ns->priority_xon_rx[i]);
1305 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1307 &os->priority_xoff_rx[i],
1308 &ns->priority_xoff_rx[i]);
1310 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1311 pf->offset_loaded, &os->link_xon_tx,
1313 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1314 pf->offset_loaded, &os->link_xoff_tx,
1316 for (i = 0; i < 8; i++) {
1317 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1319 &os->priority_xon_tx[i],
1320 &ns->priority_xon_tx[i]);
1321 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1323 &os->priority_xoff_tx[i],
1324 &ns->priority_xoff_tx[i]);
1325 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1327 &os->priority_xon_2_xoff[i],
1328 &ns->priority_xon_2_xoff[i]);
1330 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1331 I40E_GLPRT_PRC64L(hw->port),
1332 pf->offset_loaded, &os->rx_size_64,
1334 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1335 I40E_GLPRT_PRC127L(hw->port),
1336 pf->offset_loaded, &os->rx_size_127,
1338 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1339 I40E_GLPRT_PRC255L(hw->port),
1340 pf->offset_loaded, &os->rx_size_255,
1342 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1343 I40E_GLPRT_PRC511L(hw->port),
1344 pf->offset_loaded, &os->rx_size_511,
1346 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1347 I40E_GLPRT_PRC1023L(hw->port),
1348 pf->offset_loaded, &os->rx_size_1023,
1350 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1351 I40E_GLPRT_PRC1522L(hw->port),
1352 pf->offset_loaded, &os->rx_size_1522,
1354 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1355 I40E_GLPRT_PRC9522L(hw->port),
1356 pf->offset_loaded, &os->rx_size_big,
1358 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1359 pf->offset_loaded, &os->rx_undersize,
1361 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1362 pf->offset_loaded, &os->rx_fragments,
1364 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1365 pf->offset_loaded, &os->rx_oversize,
1367 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1368 pf->offset_loaded, &os->rx_jabber,
1370 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1371 I40E_GLPRT_PTC64L(hw->port),
1372 pf->offset_loaded, &os->tx_size_64,
1374 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1375 I40E_GLPRT_PTC127L(hw->port),
1376 pf->offset_loaded, &os->tx_size_127,
1378 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1379 I40E_GLPRT_PTC255L(hw->port),
1380 pf->offset_loaded, &os->tx_size_255,
1382 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1383 I40E_GLPRT_PTC511L(hw->port),
1384 pf->offset_loaded, &os->tx_size_511,
1386 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1387 I40E_GLPRT_PTC1023L(hw->port),
1388 pf->offset_loaded, &os->tx_size_1023,
1390 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1391 I40E_GLPRT_PTC1522L(hw->port),
1392 pf->offset_loaded, &os->tx_size_1522,
1394 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1395 I40E_GLPRT_PTC9522L(hw->port),
1396 pf->offset_loaded, &os->tx_size_big,
1398 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
1400 &os->fd_sb_match, &ns->fd_sb_match);
1401 /* GLPRT_MSPDC not supported */
1402 /* GLPRT_XEC not supported */
1404 pf->offset_loaded = true;
1407 i40e_update_vsi_stats(pf->main_vsi);
1409 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1410 ns->eth.rx_broadcast;
1411 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1412 ns->eth.tx_broadcast;
1413 stats->ibytes = ns->eth.rx_bytes;
1414 stats->obytes = ns->eth.tx_bytes;
1415 stats->oerrors = ns->eth.tx_errors;
1416 stats->imcasts = ns->eth.rx_multicast;
1417 stats->fdirmatch = ns->fd_sb_match;
1420 stats->ibadcrc = ns->crc_errors;
1421 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
1422 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1423 stats->imissed = ns->eth.rx_discards;
1424 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
1426 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1427 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
1428 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
1429 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
1430 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
1431 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
1432 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1433 ns->eth.rx_unknown_protocol);
1434 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
1435 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
1436 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
1437 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
1438 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
1439 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
1441 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
1442 ns->tx_dropped_link_down);
1443 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
1444 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
1446 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
1447 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
1448 ns->mac_local_faults);
1449 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
1450 ns->mac_remote_faults);
1451 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
1452 ns->rx_length_errors);
1453 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
1454 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
1455 for (i = 0; i < 8; i++) {
1456 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
1457 i, ns->priority_xon_rx[i]);
1458 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
1459 i, ns->priority_xoff_rx[i]);
1461 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
1462 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
1463 for (i = 0; i < 8; i++) {
1464 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
1465 i, ns->priority_xon_tx[i]);
1466 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
1467 i, ns->priority_xoff_tx[i]);
1468 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
1469 i, ns->priority_xon_2_xoff[i]);
1471 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
1472 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
1473 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
1474 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
1475 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
1476 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
1477 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
1478 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
1479 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
1480 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
1481 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
1482 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
1483 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
1484 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
1485 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
1486 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
1487 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
1488 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
1489 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
1490 ns->mac_short_packet_dropped);
1491 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
1492 ns->checksum_error);
1493 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
1494 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1497 /* Reset the statistics */
1499 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1501 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1503 /* It results in reloading the start point of each counter */
1504 pf->offset_loaded = false;
1508 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1509 __rte_unused uint16_t queue_id,
1510 __rte_unused uint8_t stat_idx,
1511 __rte_unused uint8_t is_rx)
1513 PMD_INIT_FUNC_TRACE();
1519 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1521 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1522 struct i40e_vsi *vsi = pf->main_vsi;
1524 dev_info->max_rx_queues = vsi->nb_qps;
1525 dev_info->max_tx_queues = vsi->nb_qps;
1526 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1527 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1528 dev_info->max_mac_addrs = vsi->max_macaddrs;
1529 dev_info->max_vfs = dev->pci_dev->max_vfs;
1530 dev_info->rx_offload_capa =
1531 DEV_RX_OFFLOAD_VLAN_STRIP |
1532 DEV_RX_OFFLOAD_IPV4_CKSUM |
1533 DEV_RX_OFFLOAD_UDP_CKSUM |
1534 DEV_RX_OFFLOAD_TCP_CKSUM;
1535 dev_info->tx_offload_capa =
1536 DEV_TX_OFFLOAD_VLAN_INSERT |
1537 DEV_TX_OFFLOAD_IPV4_CKSUM |
1538 DEV_TX_OFFLOAD_UDP_CKSUM |
1539 DEV_TX_OFFLOAD_TCP_CKSUM |
1540 DEV_TX_OFFLOAD_SCTP_CKSUM |
1541 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1542 DEV_TX_OFFLOAD_TCP_TSO;
1543 dev_info->reta_size = pf->hash_lut_size;
1544 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1546 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1548 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1549 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1550 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1552 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1556 dev_info->default_txconf = (struct rte_eth_txconf) {
1558 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1559 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1560 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1562 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1563 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1564 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1565 ETH_TXQ_FLAGS_NOOFFLOADS,
1568 if (pf->flags & I40E_FLAG_VMDQ) {
1569 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1570 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1571 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1572 pf->max_nb_vmdq_vsi;
1573 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1574 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1575 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1580 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1582 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1583 struct i40e_vsi *vsi = pf->main_vsi;
1584 PMD_INIT_FUNC_TRACE();
1587 return i40e_vsi_add_vlan(vsi, vlan_id);
1589 return i40e_vsi_delete_vlan(vsi, vlan_id);
1593 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1594 __rte_unused uint16_t tpid)
1596 PMD_INIT_FUNC_TRACE();
1600 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1602 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1603 struct i40e_vsi *vsi = pf->main_vsi;
1605 if (mask & ETH_VLAN_STRIP_MASK) {
1606 /* Enable or disable VLAN stripping */
1607 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1608 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1610 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1613 if (mask & ETH_VLAN_EXTEND_MASK) {
1614 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1615 i40e_vsi_config_double_vlan(vsi, TRUE);
1617 i40e_vsi_config_double_vlan(vsi, FALSE);
1622 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1623 __rte_unused uint16_t queue,
1624 __rte_unused int on)
1626 PMD_INIT_FUNC_TRACE();
1630 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1632 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1633 struct i40e_vsi *vsi = pf->main_vsi;
1634 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1635 struct i40e_vsi_vlan_pvid_info info;
1637 memset(&info, 0, sizeof(info));
1640 info.config.pvid = pvid;
1642 info.config.reject.tagged =
1643 data->dev_conf.txmode.hw_vlan_reject_tagged;
1644 info.config.reject.untagged =
1645 data->dev_conf.txmode.hw_vlan_reject_untagged;
1648 return i40e_vsi_vlan_pvid_set(vsi, &info);
1652 i40e_dev_led_on(struct rte_eth_dev *dev)
1654 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655 uint32_t mode = i40e_led_get(hw);
1658 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1664 i40e_dev_led_off(struct rte_eth_dev *dev)
1666 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1667 uint32_t mode = i40e_led_get(hw);
1670 i40e_led_set(hw, 0, false);
1676 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1677 __rte_unused struct rte_eth_fc_conf *fc_conf)
1679 PMD_INIT_FUNC_TRACE();
1685 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1686 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1688 PMD_INIT_FUNC_TRACE();
1693 /* Add a MAC address, and update filters */
1695 i40e_macaddr_add(struct rte_eth_dev *dev,
1696 struct ether_addr *mac_addr,
1697 __rte_unused uint32_t index,
1700 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1701 struct i40e_mac_filter_info mac_filter;
1702 struct i40e_vsi *vsi;
1705 /* If VMDQ not enabled or configured, return */
1706 if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1707 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1708 pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1713 if (pool > pf->nb_cfg_vmdq_vsi) {
1714 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1715 pool, pf->nb_cfg_vmdq_vsi);
1719 (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1720 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1725 vsi = pf->vmdq[pool - 1].vsi;
1727 ret = i40e_vsi_add_mac(vsi, &mac_filter);
1728 if (ret != I40E_SUCCESS) {
1729 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1734 /* Remove a MAC address, and update filters */
1736 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1738 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1739 struct i40e_vsi *vsi;
1740 struct rte_eth_dev_data *data = dev->data;
1741 struct ether_addr *macaddr;
1746 macaddr = &(data->mac_addrs[index]);
1748 pool_sel = dev->data->mac_pool_sel[index];
1750 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1751 if (pool_sel & (1ULL << i)) {
1755 /* No VMDQ pool enabled or configured */
1756 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1757 (i > pf->nb_cfg_vmdq_vsi)) {
1758 PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1762 vsi = pf->vmdq[i - 1].vsi;
1764 ret = i40e_vsi_delete_mac(vsi, macaddr);
1767 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1774 /* Set perfect match or hash match of MAC and VLAN for a VF */
1776 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1777 struct rte_eth_mac_filter *filter,
1781 struct i40e_mac_filter_info mac_filter;
1782 struct ether_addr old_mac;
1783 struct ether_addr *new_mac;
1784 struct i40e_pf_vf *vf = NULL;
1789 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1792 hw = I40E_PF_TO_HW(pf);
1794 if (filter == NULL) {
1795 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1799 new_mac = &filter->mac_addr;
1801 if (is_zero_ether_addr(new_mac)) {
1802 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1806 vf_id = filter->dst_id;
1808 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1809 PMD_DRV_LOG(ERR, "Invalid argument.");
1812 vf = &pf->vfs[vf_id];
1814 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1815 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1820 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1821 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1823 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1826 mac_filter.filter_type = filter->filter_type;
1827 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1828 if (ret != I40E_SUCCESS) {
1829 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1832 ether_addr_copy(new_mac, &pf->dev_addr);
1834 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1836 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1837 if (ret != I40E_SUCCESS) {
1838 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1842 /* Clear device address as it has been removed */
1843 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1844 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1850 /* MAC filter handle */
1852 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1855 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1856 struct rte_eth_mac_filter *filter;
1857 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1858 int ret = I40E_NOT_SUPPORTED;
1860 filter = (struct rte_eth_mac_filter *)(arg);
1862 switch (filter_op) {
1863 case RTE_ETH_FILTER_NOP:
1866 case RTE_ETH_FILTER_ADD:
1867 i40e_pf_disable_irq0(hw);
1869 ret = i40e_vf_mac_filter_set(pf, filter, 1);
1870 i40e_pf_enable_irq0(hw);
1872 case RTE_ETH_FILTER_DELETE:
1873 i40e_pf_disable_irq0(hw);
1875 ret = i40e_vf_mac_filter_set(pf, filter, 0);
1876 i40e_pf_enable_irq0(hw);
1879 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1880 ret = I40E_ERR_PARAM;
1888 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1889 struct rte_eth_rss_reta_entry64 *reta_conf,
1892 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1893 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1895 uint16_t i, j, lut_size = pf->hash_lut_size;
1896 uint16_t idx, shift;
1899 if (reta_size != lut_size ||
1900 reta_size > ETH_RSS_RETA_SIZE_512) {
1901 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1902 "(%d) doesn't match the number hardware can supported "
1903 "(%d)\n", reta_size, lut_size);
1907 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1908 idx = i / RTE_RETA_GROUP_SIZE;
1909 shift = i % RTE_RETA_GROUP_SIZE;
1910 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1914 if (mask == I40E_4_BIT_MASK)
1917 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1918 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1919 if (mask & (0x1 << j))
1920 lut |= reta_conf[idx].reta[shift + j] <<
1923 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1925 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1932 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1933 struct rte_eth_rss_reta_entry64 *reta_conf,
1936 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1937 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1939 uint16_t i, j, lut_size = pf->hash_lut_size;
1940 uint16_t idx, shift;
1943 if (reta_size != lut_size ||
1944 reta_size > ETH_RSS_RETA_SIZE_512) {
1945 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1946 "(%d) doesn't match the number hardware can supported "
1947 "(%d)\n", reta_size, lut_size);
1951 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1952 idx = i / RTE_RETA_GROUP_SIZE;
1953 shift = i % RTE_RETA_GROUP_SIZE;
1954 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1959 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1960 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
1961 if (mask & (0x1 << j))
1962 reta_conf[idx].reta[shift + j] = ((lut >>
1963 (CHAR_BIT * j)) & I40E_8_BIT_MASK);
1971 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1972 * @hw: pointer to the HW structure
1973 * @mem: pointer to mem struct to fill out
1974 * @size: size of memory requested
1975 * @alignment: what to align the allocation to
1977 enum i40e_status_code
1978 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1979 struct i40e_dma_mem *mem,
1983 static uint64_t id = 0;
1984 const struct rte_memzone *mz = NULL;
1985 char z_name[RTE_MEMZONE_NAMESIZE];
1988 return I40E_ERR_PARAM;
1991 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1992 #ifdef RTE_LIBRTE_XEN_DOM0
1993 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1996 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1999 return I40E_ERR_NO_MEMORY;
2004 #ifdef RTE_LIBRTE_XEN_DOM0
2005 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2007 mem->pa = mz->phys_addr;
2010 return I40E_SUCCESS;
2014 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2015 * @hw: pointer to the HW structure
2016 * @mem: ptr to mem struct to free
2018 enum i40e_status_code
2019 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2020 struct i40e_dma_mem *mem)
2022 if (!mem || !mem->va)
2023 return I40E_ERR_PARAM;
2028 return I40E_SUCCESS;
2032 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2033 * @hw: pointer to the HW structure
2034 * @mem: pointer to mem struct to fill out
2035 * @size: size of memory requested
2037 enum i40e_status_code
2038 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2039 struct i40e_virt_mem *mem,
2043 return I40E_ERR_PARAM;
2046 mem->va = rte_zmalloc("i40e", size, 0);
2049 return I40E_SUCCESS;
2051 return I40E_ERR_NO_MEMORY;
2055 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2056 * @hw: pointer to the HW structure
2057 * @mem: pointer to mem struct to free
2059 enum i40e_status_code
2060 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2061 struct i40e_virt_mem *mem)
2064 return I40E_ERR_PARAM;
2069 return I40E_SUCCESS;
2073 i40e_init_spinlock_d(struct i40e_spinlock *sp)
2075 rte_spinlock_init(&sp->spinlock);
2079 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
2081 rte_spinlock_lock(&sp->spinlock);
2085 i40e_release_spinlock_d(struct i40e_spinlock *sp)
2087 rte_spinlock_unlock(&sp->spinlock);
2091 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
2097 * Get the hardware capabilities, which will be parsed
2098 * and saved into struct i40e_hw.
2101 i40e_get_cap(struct i40e_hw *hw)
2103 struct i40e_aqc_list_capabilities_element_resp *buf;
2104 uint16_t len, size = 0;
2107 /* Calculate a huge enough buff for saving response data temporarily */
2108 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
2109 I40E_MAX_CAP_ELE_NUM;
2110 buf = rte_zmalloc("i40e", len, 0);
2112 PMD_DRV_LOG(ERR, "Failed to allocate memory");
2113 return I40E_ERR_NO_MEMORY;
2116 /* Get, parse the capabilities and save it to hw */
2117 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
2118 i40e_aqc_opc_list_func_capabilities, NULL);
2119 if (ret != I40E_SUCCESS)
2120 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
2122 /* Free the temporary buffer after being used */
2129 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2131 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2132 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2133 uint16_t sum_queues = 0, sum_vsis, left_queues;
2135 /* First check if FW support SRIOV */
2136 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2137 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2141 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2142 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2143 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2144 /* Allocate queues for pf */
2145 if (hw->func_caps.rss) {
2146 pf->flags |= I40E_FLAG_RSS;
2147 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2148 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2149 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2152 sum_queues = pf->lan_nb_qps;
2153 /* Default VSI is not counted in */
2155 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2157 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2158 pf->flags |= I40E_FLAG_SRIOV;
2159 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2160 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2161 PMD_INIT_LOG(ERR, "Config VF number %u, "
2162 "max supported %u.",
2163 dev->pci_dev->max_vfs,
2164 hw->func_caps.num_vfs);
2167 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2168 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2169 "max support %u queues.",
2170 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2173 pf->vf_num = dev->pci_dev->max_vfs;
2174 sum_queues += pf->vf_nb_qps * pf->vf_num;
2175 sum_vsis += pf->vf_num;
2176 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2177 pf->vf_num, pf->vf_nb_qps);
2181 if (hw->func_caps.vmdq) {
2182 pf->flags |= I40E_FLAG_VMDQ;
2183 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2184 pf->max_nb_vmdq_vsi = 1;
2186 * If VMDQ available, assume a single VSI can be created. Will adjust
2189 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2190 sum_vsis += pf->max_nb_vmdq_vsi;
2192 pf->vmdq_nb_qps = 0;
2193 pf->max_nb_vmdq_vsi = 0;
2195 pf->nb_cfg_vmdq_vsi = 0;
2197 if (hw->func_caps.fd) {
2198 pf->flags |= I40E_FLAG_FDIR;
2199 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2201 * Each flow director consumes one VSI and one queue,
2202 * but can't calculate out predictably here.
2206 if (sum_vsis > pf->max_num_vsi ||
2207 sum_queues > hw->func_caps.num_rx_qp) {
2208 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2209 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2210 pf->max_num_vsi, sum_vsis);
2211 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2212 hw->func_caps.num_rx_qp, sum_queues);
2216 /* Adjust VMDQ setting to support as many VMs as possible */
2217 if (pf->flags & I40E_FLAG_VMDQ) {
2218 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2220 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2221 pf->max_num_vsi - sum_vsis);
2223 /* Limit the max VMDQ number that rte_ether that can support */
2224 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2227 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2228 pf->max_nb_vmdq_vsi);
2229 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2232 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2234 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2235 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2236 sum_vsis, hw->func_caps.num_msix_vectors);
2239 return I40E_SUCCESS;
2243 i40e_pf_get_switch_config(struct i40e_pf *pf)
2245 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2246 struct i40e_aqc_get_switch_config_resp *switch_config;
2247 struct i40e_aqc_switch_config_element_resp *element;
2248 uint16_t start_seid = 0, num_reported;
2251 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2252 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2253 if (!switch_config) {
2254 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2258 /* Get the switch configurations */
2259 ret = i40e_aq_get_switch_config(hw, switch_config,
2260 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2261 if (ret != I40E_SUCCESS) {
2262 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2265 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2266 if (num_reported != 1) { /* The number should be 1 */
2267 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2271 /* Parse the switch configuration elements */
2272 element = &(switch_config->element[0]);
2273 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2274 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2275 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2277 PMD_DRV_LOG(INFO, "Unknown element type");
2280 rte_free(switch_config);
2286 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2289 struct pool_entry *entry;
2291 if (pool == NULL || num == 0)
2294 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2295 if (entry == NULL) {
2296 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2300 /* queue heap initialize */
2301 pool->num_free = num;
2302 pool->num_alloc = 0;
2304 LIST_INIT(&pool->alloc_list);
2305 LIST_INIT(&pool->free_list);
2307 /* Initialize element */
2311 LIST_INSERT_HEAD(&pool->free_list, entry, next);
2316 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2318 struct pool_entry *entry;
2323 LIST_FOREACH(entry, &pool->alloc_list, next) {
2324 LIST_REMOVE(entry, next);
2328 LIST_FOREACH(entry, &pool->free_list, next) {
2329 LIST_REMOVE(entry, next);
2334 pool->num_alloc = 0;
2336 LIST_INIT(&pool->alloc_list);
2337 LIST_INIT(&pool->free_list);
2341 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2344 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2345 uint32_t pool_offset;
2349 PMD_DRV_LOG(ERR, "Invalid parameter");
2353 pool_offset = base - pool->base;
2354 /* Lookup in alloc list */
2355 LIST_FOREACH(entry, &pool->alloc_list, next) {
2356 if (entry->base == pool_offset) {
2357 valid_entry = entry;
2358 LIST_REMOVE(entry, next);
2363 /* Not find, return */
2364 if (valid_entry == NULL) {
2365 PMD_DRV_LOG(ERR, "Failed to find entry");
2370 * Found it, move it to free list and try to merge.
2371 * In order to make merge easier, always sort it by qbase.
2372 * Find adjacent prev and last entries.
2375 LIST_FOREACH(entry, &pool->free_list, next) {
2376 if (entry->base > valid_entry->base) {
2384 /* Try to merge with next one*/
2386 /* Merge with next one */
2387 if (valid_entry->base + valid_entry->len == next->base) {
2388 next->base = valid_entry->base;
2389 next->len += valid_entry->len;
2390 rte_free(valid_entry);
2397 /* Merge with previous one */
2398 if (prev->base + prev->len == valid_entry->base) {
2399 prev->len += valid_entry->len;
2400 /* If it merge with next one, remove next node */
2402 LIST_REMOVE(valid_entry, next);
2403 rte_free(valid_entry);
2405 rte_free(valid_entry);
2411 /* Not find any entry to merge, insert */
2414 LIST_INSERT_AFTER(prev, valid_entry, next);
2415 else if (next != NULL)
2416 LIST_INSERT_BEFORE(next, valid_entry, next);
2417 else /* It's empty list, insert to head */
2418 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2421 pool->num_free += valid_entry->len;
2422 pool->num_alloc -= valid_entry->len;
2428 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2431 struct pool_entry *entry, *valid_entry;
2433 if (pool == NULL || num == 0) {
2434 PMD_DRV_LOG(ERR, "Invalid parameter");
2438 if (pool->num_free < num) {
2439 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2440 num, pool->num_free);
2445 /* Lookup in free list and find most fit one */
2446 LIST_FOREACH(entry, &pool->free_list, next) {
2447 if (entry->len >= num) {
2449 if (entry->len == num) {
2450 valid_entry = entry;
2453 if (valid_entry == NULL || valid_entry->len > entry->len)
2454 valid_entry = entry;
2458 /* Not find one to satisfy the request, return */
2459 if (valid_entry == NULL) {
2460 PMD_DRV_LOG(ERR, "No valid entry found");
2464 * The entry have equal queue number as requested,
2465 * remove it from alloc_list.
2467 if (valid_entry->len == num) {
2468 LIST_REMOVE(valid_entry, next);
2471 * The entry have more numbers than requested,
2472 * create a new entry for alloc_list and minus its
2473 * queue base and number in free_list.
2475 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2476 if (entry == NULL) {
2477 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2481 entry->base = valid_entry->base;
2483 valid_entry->base += num;
2484 valid_entry->len -= num;
2485 valid_entry = entry;
2488 /* Insert it into alloc list, not sorted */
2489 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2491 pool->num_free -= valid_entry->len;
2492 pool->num_alloc += valid_entry->len;
2494 return (valid_entry->base + pool->base);
2498 * bitmap_is_subset - Check whether src2 is subset of src1
2501 bitmap_is_subset(uint8_t src1, uint8_t src2)
2503 return !((src1 ^ src2) & src2);
2507 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2509 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2511 /* If DCB is not supported, only default TC is supported */
2512 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2513 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2517 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2518 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2519 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2523 return I40E_SUCCESS;
2527 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2528 struct i40e_vsi_vlan_pvid_info *info)
2531 struct i40e_vsi_context ctxt;
2532 uint8_t vlan_flags = 0;
2535 if (vsi == NULL || info == NULL) {
2536 PMD_DRV_LOG(ERR, "invalid parameters");
2537 return I40E_ERR_PARAM;
2541 vsi->info.pvid = info->config.pvid;
2543 * If insert pvid is enabled, only tagged pkts are
2544 * allowed to be sent out.
2546 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2547 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2550 if (info->config.reject.tagged == 0)
2551 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2553 if (info->config.reject.untagged == 0)
2554 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2556 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2557 I40E_AQ_VSI_PVLAN_MODE_MASK);
2558 vsi->info.port_vlan_flags |= vlan_flags;
2559 vsi->info.valid_sections =
2560 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2561 memset(&ctxt, 0, sizeof(ctxt));
2562 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2563 ctxt.seid = vsi->seid;
2565 hw = I40E_VSI_TO_HW(vsi);
2566 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2567 if (ret != I40E_SUCCESS)
2568 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2574 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2576 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2578 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2580 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2581 if (ret != I40E_SUCCESS)
2585 PMD_DRV_LOG(ERR, "seid not valid");
2589 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2590 tc_bw_data.tc_valid_bits = enabled_tcmap;
2591 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2592 tc_bw_data.tc_bw_credits[i] =
2593 (enabled_tcmap & (1 << i)) ? 1 : 0;
2595 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2596 if (ret != I40E_SUCCESS) {
2597 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2601 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2602 sizeof(vsi->info.qs_handle));
2603 return I40E_SUCCESS;
2607 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2608 struct i40e_aqc_vsi_properties_data *info,
2609 uint8_t enabled_tcmap)
2611 int ret, total_tc = 0, i;
2612 uint16_t qpnum_per_tc, bsf, qp_idx;
2614 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2615 if (ret != I40E_SUCCESS)
2618 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2619 if (enabled_tcmap & (1 << i))
2621 vsi->enabled_tc = enabled_tcmap;
2623 /* Number of queues per enabled TC */
2624 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2625 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2626 bsf = rte_bsf32(qpnum_per_tc);
2628 /* Adjust the queue number to actual queues that can be applied */
2629 vsi->nb_qps = qpnum_per_tc * total_tc;
2632 * Configure TC and queue mapping parameters, for enabled TC,
2633 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2634 * default queue will serve it.
2637 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2638 if (vsi->enabled_tc & (1 << i)) {
2639 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2640 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2641 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2642 qp_idx += qpnum_per_tc;
2644 info->tc_mapping[i] = 0;
2647 /* Associate queue number with VSI */
2648 if (vsi->type == I40E_VSI_SRIOV) {
2649 info->mapping_flags |=
2650 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2651 for (i = 0; i < vsi->nb_qps; i++)
2652 info->queue_mapping[i] =
2653 rte_cpu_to_le_16(vsi->base_queue + i);
2655 info->mapping_flags |=
2656 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2657 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2659 info->valid_sections |=
2660 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2662 return I40E_SUCCESS;
2666 i40e_veb_release(struct i40e_veb *veb)
2668 struct i40e_vsi *vsi;
2671 if (veb == NULL || veb->associate_vsi == NULL)
2674 if (!TAILQ_EMPTY(&veb->head)) {
2675 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2679 vsi = veb->associate_vsi;
2680 hw = I40E_VSI_TO_HW(vsi);
2682 vsi->uplink_seid = veb->uplink_seid;
2683 i40e_aq_delete_element(hw, veb->seid, NULL);
2686 return I40E_SUCCESS;
2690 static struct i40e_veb *
2691 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2693 struct i40e_veb *veb;
2697 if (NULL == pf || vsi == NULL) {
2698 PMD_DRV_LOG(ERR, "veb setup failed, "
2699 "associated VSI shouldn't null");
2702 hw = I40E_PF_TO_HW(pf);
2704 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2706 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2710 veb->associate_vsi = vsi;
2711 TAILQ_INIT(&veb->head);
2712 veb->uplink_seid = vsi->uplink_seid;
2714 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2715 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2717 if (ret != I40E_SUCCESS) {
2718 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2719 hw->aq.asq_last_status);
2723 /* get statistics index */
2724 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2725 &veb->stats_idx, NULL, NULL, NULL);
2726 if (ret != I40E_SUCCESS) {
2727 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2728 hw->aq.asq_last_status);
2732 /* Get VEB bandwidth, to be implemented */
2733 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2734 vsi->uplink_seid = veb->seid;
2743 i40e_vsi_release(struct i40e_vsi *vsi)
2747 struct i40e_vsi_list *vsi_list;
2749 struct i40e_mac_filter *f;
2752 return I40E_SUCCESS;
2754 pf = I40E_VSI_TO_PF(vsi);
2755 hw = I40E_VSI_TO_HW(vsi);
2757 /* VSI has child to attach, release child first */
2759 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2760 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2762 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2764 i40e_veb_release(vsi->veb);
2767 /* Remove all macvlan filters of the VSI */
2768 i40e_vsi_remove_all_macvlan_filter(vsi);
2769 TAILQ_FOREACH(f, &vsi->mac_list, next)
2772 if (vsi->type != I40E_VSI_MAIN) {
2773 /* Remove vsi from parent's sibling list */
2774 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2775 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2776 return I40E_ERR_PARAM;
2778 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2779 &vsi->sib_vsi_list, list);
2781 /* Remove all switch element of the VSI */
2782 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2783 if (ret != I40E_SUCCESS)
2784 PMD_DRV_LOG(ERR, "Failed to delete element");
2786 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2788 if (vsi->type != I40E_VSI_SRIOV)
2789 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2792 return I40E_SUCCESS;
2796 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2798 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2799 struct i40e_aqc_remove_macvlan_element_data def_filter;
2800 struct i40e_mac_filter_info filter;
2803 if (vsi->type != I40E_VSI_MAIN)
2804 return I40E_ERR_CONFIG;
2805 memset(&def_filter, 0, sizeof(def_filter));
2806 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2808 def_filter.vlan_tag = 0;
2809 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2810 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2811 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2812 if (ret != I40E_SUCCESS) {
2813 struct i40e_mac_filter *f;
2814 struct ether_addr *mac;
2816 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2818 /* It needs to add the permanent mac into mac list */
2819 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2821 PMD_DRV_LOG(ERR, "failed to allocate memory");
2822 return I40E_ERR_NO_MEMORY;
2824 mac = &f->mac_info.mac_addr;
2825 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2827 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2828 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2833 (void)rte_memcpy(&filter.mac_addr,
2834 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2835 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2836 return i40e_vsi_add_mac(vsi, &filter);
2840 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2842 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2843 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2844 struct i40e_hw *hw = &vsi->adapter->hw;
2848 memset(&bw_config, 0, sizeof(bw_config));
2849 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2850 if (ret != I40E_SUCCESS) {
2851 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2852 hw->aq.asq_last_status);
2856 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2857 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2858 &ets_sla_config, NULL);
2859 if (ret != I40E_SUCCESS) {
2860 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2861 "configuration %u", hw->aq.asq_last_status);
2865 /* Not store the info yet, just print out */
2866 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2867 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2868 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2869 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2870 ets_sla_config.share_credits[i]);
2871 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2872 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2873 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2874 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2883 i40e_vsi_setup(struct i40e_pf *pf,
2884 enum i40e_vsi_type type,
2885 struct i40e_vsi *uplink_vsi,
2886 uint16_t user_param)
2888 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2889 struct i40e_vsi *vsi;
2890 struct i40e_mac_filter_info filter;
2892 struct i40e_vsi_context ctxt;
2893 struct ether_addr broadcast =
2894 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2896 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2897 PMD_DRV_LOG(ERR, "VSI setup failed, "
2898 "VSI link shouldn't be NULL");
2902 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2903 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2904 "uplink VSI should be NULL");
2908 /* If uplink vsi didn't setup VEB, create one first */
2909 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2910 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2912 if (NULL == uplink_vsi->veb) {
2913 PMD_DRV_LOG(ERR, "VEB setup failed");
2918 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2920 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2923 TAILQ_INIT(&vsi->mac_list);
2925 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2926 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2927 vsi->parent_vsi = uplink_vsi;
2928 vsi->user_param = user_param;
2929 /* Allocate queues */
2930 switch (vsi->type) {
2931 case I40E_VSI_MAIN :
2932 vsi->nb_qps = pf->lan_nb_qps;
2934 case I40E_VSI_SRIOV :
2935 vsi->nb_qps = pf->vf_nb_qps;
2937 case I40E_VSI_VMDQ2:
2938 vsi->nb_qps = pf->vmdq_nb_qps;
2941 vsi->nb_qps = pf->fdir_nb_qps;
2947 * The filter status descriptor is reported in rx queue 0,
2948 * while the tx queue for fdir filter programming has no
2949 * such constraints, can be non-zero queues.
2950 * To simplify it, choose FDIR vsi use queue 0 pair.
2951 * To make sure it will use queue 0 pair, queue allocation
2952 * need be done before this function is called
2954 if (type != I40E_VSI_FDIR) {
2955 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2957 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2961 vsi->base_queue = ret;
2963 vsi->base_queue = I40E_FDIR_QUEUE_ID;
2965 /* VF has MSIX interrupt in VF range, don't allocate here */
2966 if (type != I40E_VSI_SRIOV) {
2967 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2969 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2970 goto fail_queue_alloc;
2972 vsi->msix_intr = ret;
2976 if (type == I40E_VSI_MAIN) {
2977 /* For main VSI, no need to add since it's default one */
2978 vsi->uplink_seid = pf->mac_seid;
2979 vsi->seid = pf->main_vsi_seid;
2980 /* Bind queues with specific MSIX interrupt */
2982 * Needs 2 interrupt at least, one for misc cause which will
2983 * enabled from OS side, Another for queues binding the
2984 * interrupt from device side only.
2987 /* Get default VSI parameters from hardware */
2988 memset(&ctxt, 0, sizeof(ctxt));
2989 ctxt.seid = vsi->seid;
2990 ctxt.pf_num = hw->pf_id;
2991 ctxt.uplink_seid = vsi->uplink_seid;
2993 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2994 if (ret != I40E_SUCCESS) {
2995 PMD_DRV_LOG(ERR, "Failed to get VSI params");
2996 goto fail_msix_alloc;
2998 (void)rte_memcpy(&vsi->info, &ctxt.info,
2999 sizeof(struct i40e_aqc_vsi_properties_data));
3000 vsi->vsi_id = ctxt.vsi_number;
3001 vsi->info.valid_sections = 0;
3003 /* Configure tc, enabled TC0 only */
3004 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3006 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3007 goto fail_msix_alloc;
3010 /* TC, queue mapping */
3011 memset(&ctxt, 0, sizeof(ctxt));
3012 vsi->info.valid_sections |=
3013 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3014 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3015 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3016 (void)rte_memcpy(&ctxt.info, &vsi->info,
3017 sizeof(struct i40e_aqc_vsi_properties_data));
3018 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3019 I40E_DEFAULT_TCMAP);
3020 if (ret != I40E_SUCCESS) {
3021 PMD_DRV_LOG(ERR, "Failed to configure "
3022 "TC queue mapping");
3023 goto fail_msix_alloc;
3025 ctxt.seid = vsi->seid;
3026 ctxt.pf_num = hw->pf_id;
3027 ctxt.uplink_seid = vsi->uplink_seid;
3030 /* Update VSI parameters */
3031 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3032 if (ret != I40E_SUCCESS) {
3033 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3034 goto fail_msix_alloc;
3037 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3038 sizeof(vsi->info.tc_mapping));
3039 (void)rte_memcpy(&vsi->info.queue_mapping,
3040 &ctxt.info.queue_mapping,
3041 sizeof(vsi->info.queue_mapping));
3042 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3043 vsi->info.valid_sections = 0;
3045 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3049 * Updating default filter settings are necessary to prevent
3050 * reception of tagged packets.
3051 * Some old firmware configurations load a default macvlan
3052 * filter which accepts both tagged and untagged packets.
3053 * The updating is to use a normal filter instead if needed.
3054 * For NVM 4.2.2 or after, the updating is not needed anymore.
3055 * The firmware with correct configurations load the default
3056 * macvlan filter which is expected and cannot be removed.
3058 i40e_update_default_filter_setting(vsi);
3059 } else if (type == I40E_VSI_SRIOV) {
3060 memset(&ctxt, 0, sizeof(ctxt));
3062 * For other VSI, the uplink_seid equals to uplink VSI's
3063 * uplink_seid since they share same VEB
3065 vsi->uplink_seid = uplink_vsi->uplink_seid;
3066 ctxt.pf_num = hw->pf_id;
3067 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
3068 ctxt.uplink_seid = vsi->uplink_seid;
3069 ctxt.connection_type = 0x1;
3070 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
3073 * Do not configure switch ID to enable VEB switch by
3074 * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
3075 * if the source mac address of packet sent from VF is not
3076 * listed in the VEB's mac table, the VEB will switch the
3077 * packet back to the VF. Need to enable it when HW issue
3081 /* Configure port/vlan */
3082 ctxt.info.valid_sections |=
3083 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3084 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3085 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3086 I40E_DEFAULT_TCMAP);
3087 if (ret != I40E_SUCCESS) {
3088 PMD_DRV_LOG(ERR, "Failed to configure "
3089 "TC queue mapping");
3090 goto fail_msix_alloc;
3092 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3093 ctxt.info.valid_sections |=
3094 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3096 * Since VSI is not created yet, only configure parameter,
3097 * will add vsi below.
3099 } else if (type == I40E_VSI_VMDQ2) {
3100 memset(&ctxt, 0, sizeof(ctxt));
3102 * For other VSI, the uplink_seid equals to uplink VSI's
3103 * uplink_seid since they share same VEB
3105 vsi->uplink_seid = uplink_vsi->uplink_seid;
3106 ctxt.pf_num = hw->pf_id;
3108 ctxt.uplink_seid = vsi->uplink_seid;
3109 ctxt.connection_type = 0x1;
3110 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
3112 ctxt.info.valid_sections |=
3113 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
3114 /* user_param carries flag to enable loop back */
3116 ctxt.info.switch_id =
3117 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
3118 ctxt.info.switch_id |=
3119 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
3122 /* Configure port/vlan */
3123 ctxt.info.valid_sections |=
3124 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3125 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3126 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3127 I40E_DEFAULT_TCMAP);
3128 if (ret != I40E_SUCCESS) {
3129 PMD_DRV_LOG(ERR, "Failed to configure "
3130 "TC queue mapping");
3131 goto fail_msix_alloc;
3133 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3134 ctxt.info.valid_sections |=
3135 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3136 } else if (type == I40E_VSI_FDIR) {
3137 memset(&ctxt, 0, sizeof(ctxt));
3138 vsi->uplink_seid = uplink_vsi->uplink_seid;
3139 ctxt.pf_num = hw->pf_id;
3141 ctxt.uplink_seid = vsi->uplink_seid;
3142 ctxt.connection_type = 0x1; /* regular data port */
3143 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
3144 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3145 I40E_DEFAULT_TCMAP);
3146 if (ret != I40E_SUCCESS) {
3147 PMD_DRV_LOG(ERR, "Failed to configure "
3148 "TC queue mapping.");
3149 goto fail_msix_alloc;
3151 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3152 ctxt.info.valid_sections |=
3153 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3155 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
3156 goto fail_msix_alloc;
3159 if (vsi->type != I40E_VSI_MAIN) {
3160 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
3161 if (ret != I40E_SUCCESS) {
3162 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
3163 hw->aq.asq_last_status);
3164 goto fail_msix_alloc;
3166 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3167 vsi->info.valid_sections = 0;
3168 vsi->seid = ctxt.seid;
3169 vsi->vsi_id = ctxt.vsi_number;
3170 vsi->sib_vsi_list.vsi = vsi;
3171 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3172 &vsi->sib_vsi_list, list);
3175 /* MAC/VLAN configuration */
3176 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3177 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3179 ret = i40e_vsi_add_mac(vsi, &filter);
3180 if (ret != I40E_SUCCESS) {
3181 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3182 goto fail_msix_alloc;
3185 /* Get VSI BW information */
3186 i40e_vsi_dump_bw_config(vsi);
3189 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3191 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3197 /* Configure vlan stripping on or off */
3199 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3201 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3202 struct i40e_vsi_context ctxt;
3204 int ret = I40E_SUCCESS;
3206 /* Check if it has been already on or off */
3207 if (vsi->info.valid_sections &
3208 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3210 if ((vsi->info.port_vlan_flags &
3211 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3212 return 0; /* already on */
3214 if ((vsi->info.port_vlan_flags &
3215 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3216 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3217 return 0; /* already off */
3222 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3224 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3225 vsi->info.valid_sections =
3226 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3227 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3228 vsi->info.port_vlan_flags |= vlan_flags;
3229 ctxt.seid = vsi->seid;
3230 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3231 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3233 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3234 on ? "enable" : "disable");
3240 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3242 struct rte_eth_dev_data *data = dev->data;
3245 /* Apply vlan offload setting */
3246 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3248 /* Apply double-vlan setting, not implemented yet */
3250 /* Apply pvid setting */
3251 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3252 data->dev_conf.txmode.hw_vlan_insert_pvid);
3254 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3260 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3262 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3264 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3268 i40e_update_flow_control(struct i40e_hw *hw)
3270 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3271 struct i40e_link_status link_status;
3272 uint32_t rxfc = 0, txfc = 0, reg;
3276 memset(&link_status, 0, sizeof(link_status));
3277 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3278 if (ret != I40E_SUCCESS) {
3279 PMD_DRV_LOG(ERR, "Failed to get link status information");
3280 goto write_reg; /* Disable flow control */
3283 an_info = hw->phy.link_info.an_info;
3284 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3285 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3286 ret = I40E_ERR_NOT_READY;
3287 goto write_reg; /* Disable flow control */
3290 * If link auto negotiation is enabled, flow control needs to
3291 * be configured according to it
3293 switch (an_info & I40E_LINK_PAUSE_RXTX) {
3294 case I40E_LINK_PAUSE_RXTX:
3297 hw->fc.current_mode = I40E_FC_FULL;
3299 case I40E_AQ_LINK_PAUSE_RX:
3301 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3303 case I40E_AQ_LINK_PAUSE_TX:
3305 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3308 hw->fc.current_mode = I40E_FC_NONE;
3313 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3314 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3315 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3316 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3317 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3318 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3325 i40e_pf_setup(struct i40e_pf *pf)
3327 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3328 struct i40e_filter_control_settings settings;
3329 struct i40e_vsi *vsi;
3332 /* Clear all stats counters */
3333 pf->offset_loaded = FALSE;
3334 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3335 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3337 ret = i40e_pf_get_switch_config(pf);
3338 if (ret != I40E_SUCCESS) {
3339 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3342 if (pf->flags & I40E_FLAG_FDIR) {
3343 /* make queue allocated first, let FDIR use queue pair 0*/
3344 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
3345 if (ret != I40E_FDIR_QUEUE_ID) {
3346 PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
3348 pf->flags &= ~I40E_FLAG_FDIR;
3351 /* main VSI setup */
3352 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3354 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3355 return I40E_ERR_NOT_READY;
3359 /* Configure filter control */
3360 memset(&settings, 0, sizeof(settings));
3361 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3362 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3363 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3364 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3366 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3367 hw->func_caps.rss_table_size);
3368 return I40E_ERR_PARAM;
3370 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3371 "size: %u\n", hw->func_caps.rss_table_size);
3372 pf->hash_lut_size = hw->func_caps.rss_table_size;
3374 /* Enable ethtype and macvlan filters */
3375 settings.enable_ethtype = TRUE;
3376 settings.enable_macvlan = TRUE;
3377 ret = i40e_set_filter_control(hw, &settings);
3379 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3382 /* Update flow control according to the auto negotiation */
3383 i40e_update_flow_control(hw);
3385 return I40E_SUCCESS;
3389 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3395 * Set or clear TX Queue Disable flags,
3396 * which is required by hardware.
3398 i40e_pre_tx_queue_cfg(hw, q_idx, on);
3399 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3401 /* Wait until the request is finished */
3402 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3403 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3404 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3405 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3406 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3412 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3413 return I40E_SUCCESS; /* already on, skip next steps */
3415 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3416 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3418 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3419 return I40E_SUCCESS; /* already off, skip next steps */
3420 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3422 /* Write the register */
3423 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3424 /* Check the result */
3425 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3426 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3427 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3429 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3430 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3433 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3434 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3438 /* Check if it is timeout */
3439 if (j >= I40E_CHK_Q_ENA_COUNT) {
3440 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3441 (on ? "enable" : "disable"), q_idx);
3442 return I40E_ERR_TIMEOUT;
3445 return I40E_SUCCESS;
3448 /* Swith on or off the tx queues */
3450 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3452 struct rte_eth_dev_data *dev_data = pf->dev_data;
3453 struct i40e_tx_queue *txq;
3454 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3458 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3459 txq = dev_data->tx_queues[i];
3460 /* Don't operate the queue if not configured or
3461 * if starting only per queue */
3462 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3465 ret = i40e_dev_tx_queue_start(dev, i);
3467 ret = i40e_dev_tx_queue_stop(dev, i);
3468 if ( ret != I40E_SUCCESS)
3472 return I40E_SUCCESS;
3476 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3481 /* Wait until the request is finished */
3482 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3483 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3484 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3485 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3486 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3491 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3492 return I40E_SUCCESS; /* Already on, skip next steps */
3493 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3495 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3496 return I40E_SUCCESS; /* Already off, skip next steps */
3497 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3500 /* Write the register */
3501 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3502 /* Check the result */
3503 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3504 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3505 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3507 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3508 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3511 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3512 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3517 /* Check if it is timeout */
3518 if (j >= I40E_CHK_Q_ENA_COUNT) {
3519 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3520 (on ? "enable" : "disable"), q_idx);
3521 return I40E_ERR_TIMEOUT;
3524 return I40E_SUCCESS;
3526 /* Switch on or off the rx queues */
3528 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3530 struct rte_eth_dev_data *dev_data = pf->dev_data;
3531 struct i40e_rx_queue *rxq;
3532 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3536 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3537 rxq = dev_data->rx_queues[i];
3538 /* Don't operate the queue if not configured or
3539 * if starting only per queue */
3540 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3543 ret = i40e_dev_rx_queue_start(dev, i);
3545 ret = i40e_dev_rx_queue_stop(dev, i);
3546 if (ret != I40E_SUCCESS)
3550 return I40E_SUCCESS;
3553 /* Switch on or off all the rx/tx queues */
3555 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3560 /* enable rx queues before enabling tx queues */
3561 ret = i40e_dev_switch_rx_queues(pf, on);
3563 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3566 ret = i40e_dev_switch_tx_queues(pf, on);
3568 /* Stop tx queues before stopping rx queues */
3569 ret = i40e_dev_switch_tx_queues(pf, on);
3571 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3574 ret = i40e_dev_switch_rx_queues(pf, on);
3580 /* Initialize VSI for TX */
3582 i40e_dev_tx_init(struct i40e_pf *pf)
3584 struct rte_eth_dev_data *data = pf->dev_data;
3586 uint32_t ret = I40E_SUCCESS;
3587 struct i40e_tx_queue *txq;
3589 for (i = 0; i < data->nb_tx_queues; i++) {
3590 txq = data->tx_queues[i];
3591 if (!txq || !txq->q_set)
3593 ret = i40e_tx_queue_init(txq);
3594 if (ret != I40E_SUCCESS)
3601 /* Initialize VSI for RX */
3603 i40e_dev_rx_init(struct i40e_pf *pf)
3605 struct rte_eth_dev_data *data = pf->dev_data;
3606 int ret = I40E_SUCCESS;
3608 struct i40e_rx_queue *rxq;
3610 i40e_pf_config_mq_rx(pf);
3611 for (i = 0; i < data->nb_rx_queues; i++) {
3612 rxq = data->rx_queues[i];
3613 if (!rxq || !rxq->q_set)
3616 ret = i40e_rx_queue_init(rxq);
3617 if (ret != I40E_SUCCESS) {
3618 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3628 i40e_dev_rxtx_init(struct i40e_pf *pf)
3632 err = i40e_dev_tx_init(pf);
3634 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3637 err = i40e_dev_rx_init(pf);
3639 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3647 i40e_vmdq_setup(struct rte_eth_dev *dev)
3649 struct rte_eth_conf *conf = &dev->data->dev_conf;
3650 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3651 int i, err, conf_vsis, j, loop;
3652 struct i40e_vsi *vsi;
3653 struct i40e_vmdq_info *vmdq_info;
3654 struct rte_eth_vmdq_rx_conf *vmdq_conf;
3655 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3658 * Disable interrupt to avoid message from VF. Furthermore, it will
3659 * avoid race condition in VSI creation/destroy.
3661 i40e_pf_disable_irq0(hw);
3663 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3664 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3668 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3669 if (conf_vsis > pf->max_nb_vmdq_vsi) {
3670 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3671 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3672 pf->max_nb_vmdq_vsi);
3676 if (pf->vmdq != NULL) {
3677 PMD_INIT_LOG(INFO, "VMDQ already configured");
3681 pf->vmdq = rte_zmalloc("vmdq_info_struct",
3682 sizeof(*vmdq_info) * conf_vsis, 0);
3684 if (pf->vmdq == NULL) {
3685 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3689 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3691 /* Create VMDQ VSI */
3692 for (i = 0; i < conf_vsis; i++) {
3693 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3694 vmdq_conf->enable_loop_back);
3696 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3700 vmdq_info = &pf->vmdq[i];
3702 vmdq_info->vsi = vsi;
3704 pf->nb_cfg_vmdq_vsi = conf_vsis;
3706 /* Configure Vlan */
3707 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3708 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3709 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3710 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3711 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3712 vmdq_conf->pool_map[i].vlan_id, j);
3714 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3715 vmdq_conf->pool_map[i].vlan_id);
3717 PMD_INIT_LOG(ERR, "Failed to add vlan");
3725 i40e_pf_enable_irq0(hw);
3730 for (i = 0; i < conf_vsis; i++)
3731 if (pf->vmdq[i].vsi == NULL)
3734 i40e_vsi_release(pf->vmdq[i].vsi);
3738 i40e_pf_enable_irq0(hw);
3743 i40e_stat_update_32(struct i40e_hw *hw,
3751 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3755 if (new_data >= *offset)
3756 *stat = (uint64_t)(new_data - *offset);
3758 *stat = (uint64_t)((new_data +
3759 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
3763 i40e_stat_update_48(struct i40e_hw *hw,
3772 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3773 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3774 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
3779 if (new_data >= *offset)
3780 *stat = new_data - *offset;
3782 *stat = (uint64_t)((new_data +
3783 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
3785 *stat &= I40E_48_BIT_MASK;
3790 i40e_pf_disable_irq0(struct i40e_hw *hw)
3792 /* Disable all interrupt types */
3793 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3794 I40E_WRITE_FLUSH(hw);
3799 i40e_pf_enable_irq0(struct i40e_hw *hw)
3801 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3802 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3803 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3804 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3805 I40E_WRITE_FLUSH(hw);
3809 i40e_pf_config_irq0(struct i40e_hw *hw)
3811 /* read pending request and disable first */
3812 i40e_pf_disable_irq0(hw);
3813 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3814 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3815 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3817 /* Link no queues with irq0 */
3818 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3819 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3823 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3825 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3826 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3829 uint32_t index, offset, val;
3834 * Try to find which VF trigger a reset, use absolute VF id to access
3835 * since the reg is global register.
3837 for (i = 0; i < pf->vf_num; i++) {
3838 abs_vf_id = hw->func_caps.vf_base_id + i;
3839 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3840 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3841 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3842 /* VFR event occured */
3843 if (val & (0x1 << offset)) {
3846 /* Clear the event first */
3847 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3849 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3851 * Only notify a VF reset event occured,
3852 * don't trigger another SW reset
3854 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3855 if (ret != I40E_SUCCESS)
3856 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3862 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3864 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3865 struct i40e_arq_event_info info;
3866 uint16_t pending, opcode;
3869 info.buf_len = I40E_AQ_BUF_SZ;
3870 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3871 if (!info.msg_buf) {
3872 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3878 ret = i40e_clean_arq_element(hw, &info, &pending);
3880 if (ret != I40E_SUCCESS) {
3881 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3882 "aq_err: %u", hw->aq.asq_last_status);
3885 opcode = rte_le_to_cpu_16(info.desc.opcode);
3888 case i40e_aqc_opc_send_msg_to_pf:
3889 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3890 i40e_pf_host_handle_vf_msg(dev,
3891 rte_le_to_cpu_16(info.desc.retval),
3892 rte_le_to_cpu_32(info.desc.cookie_high),
3893 rte_le_to_cpu_32(info.desc.cookie_low),
3898 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3903 rte_free(info.msg_buf);
3907 * Interrupt handler is registered as the alarm callback for handling LSC
3908 * interrupt in a definite of time, in order to wait the NIC into a stable
3909 * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3910 * no need for link down interrupt.
3913 i40e_dev_interrupt_delayed_handler(void *param)
3915 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3916 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3919 /* read interrupt causes again */
3920 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3922 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3923 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3924 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3925 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3926 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3927 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3928 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3929 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3930 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3931 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3932 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3934 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3935 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3936 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3937 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3938 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3940 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3941 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3942 i40e_dev_handle_vfr_event(dev);
3944 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3945 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3946 i40e_dev_handle_aq_msg(dev);
3949 /* handle the link up interrupt in an alarm callback */
3950 i40e_dev_link_update(dev, 0);
3951 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3953 i40e_pf_enable_irq0(hw);
3954 rte_intr_enable(&(dev->pci_dev->intr_handle));
3958 * Interrupt handler triggered by NIC for handling
3959 * specific interrupt.
3962 * Pointer to interrupt handle.
3964 * The address of parameter (struct rte_eth_dev *) regsitered before.
3970 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3973 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3974 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3977 /* Disable interrupt */
3978 i40e_pf_disable_irq0(hw);
3980 /* read out interrupt causes */
3981 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3983 /* No interrupt event indicated */
3984 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3985 PMD_DRV_LOG(INFO, "No interrupt event");
3988 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3989 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3990 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
3991 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3992 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
3993 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3994 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
3995 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3996 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
3997 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3998 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
3999 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4000 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4001 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4002 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4003 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4005 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4006 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4007 i40e_dev_handle_vfr_event(dev);
4009 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4010 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4011 i40e_dev_handle_aq_msg(dev);
4014 /* Link Status Change interrupt */
4015 if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4016 #define I40E_US_PER_SECOND 1000000
4017 struct rte_eth_link link;
4019 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4020 memset(&link, 0, sizeof(link));
4021 rte_i40e_dev_atomic_read_link_status(dev, &link);
4022 i40e_dev_link_update(dev, 0);
4025 * For link up interrupt, it needs to wait 1 second to let the
4026 * hardware be a stable state. Otherwise several consecutive
4027 * interrupts can be observed.
4028 * For link down interrupt, no need to wait.
4030 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4031 i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4034 _rte_eth_dev_callback_process(dev,
4035 RTE_ETH_EVENT_INTR_LSC);
4039 /* Enable interrupt */
4040 i40e_pf_enable_irq0(hw);
4041 rte_intr_enable(&(dev->pci_dev->intr_handle));
4045 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4046 struct i40e_macvlan_filter *filter,
4049 int ele_num, ele_buff_size;
4050 int num, actual_num, i;
4052 int ret = I40E_SUCCESS;
4053 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4054 struct i40e_aqc_add_macvlan_element_data *req_list;
4056 if (filter == NULL || total == 0)
4057 return I40E_ERR_PARAM;
4058 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4059 ele_buff_size = hw->aq.asq_buf_size;
4061 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
4062 if (req_list == NULL) {
4063 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4064 return I40E_ERR_NO_MEMORY;
4069 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4070 memset(req_list, 0, ele_buff_size);
4072 for (i = 0; i < actual_num; i++) {
4073 (void)rte_memcpy(req_list[i].mac_addr,
4074 &filter[num + i].macaddr, ETH_ADDR_LEN);
4075 req_list[i].vlan_tag =
4076 rte_cpu_to_le_16(filter[num + i].vlan_id);
4078 switch (filter[num + i].filter_type) {
4079 case RTE_MAC_PERFECT_MATCH:
4080 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
4081 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4083 case RTE_MACVLAN_PERFECT_MATCH:
4084 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
4086 case RTE_MAC_HASH_MATCH:
4087 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
4088 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4090 case RTE_MACVLAN_HASH_MATCH:
4091 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
4094 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
4095 ret = I40E_ERR_PARAM;
4099 req_list[i].queue_number = 0;
4101 req_list[i].flags = rte_cpu_to_le_16(flags);
4104 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
4106 if (ret != I40E_SUCCESS) {
4107 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
4111 } while (num < total);
4119 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
4120 struct i40e_macvlan_filter *filter,
4123 int ele_num, ele_buff_size;
4124 int num, actual_num, i;
4126 int ret = I40E_SUCCESS;
4127 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4128 struct i40e_aqc_remove_macvlan_element_data *req_list;
4130 if (filter == NULL || total == 0)
4131 return I40E_ERR_PARAM;
4133 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4134 ele_buff_size = hw->aq.asq_buf_size;
4136 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
4137 if (req_list == NULL) {
4138 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4139 return I40E_ERR_NO_MEMORY;
4144 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4145 memset(req_list, 0, ele_buff_size);
4147 for (i = 0; i < actual_num; i++) {
4148 (void)rte_memcpy(req_list[i].mac_addr,
4149 &filter[num + i].macaddr, ETH_ADDR_LEN);
4150 req_list[i].vlan_tag =
4151 rte_cpu_to_le_16(filter[num + i].vlan_id);
4153 switch (filter[num + i].filter_type) {
4154 case RTE_MAC_PERFECT_MATCH:
4155 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4156 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4158 case RTE_MACVLAN_PERFECT_MATCH:
4159 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4161 case RTE_MAC_HASH_MATCH:
4162 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
4163 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4165 case RTE_MACVLAN_HASH_MATCH:
4166 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
4169 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
4170 ret = I40E_ERR_PARAM;
4173 req_list[i].flags = rte_cpu_to_le_16(flags);
4176 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4178 if (ret != I40E_SUCCESS) {
4179 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4183 } while (num < total);
4190 /* Find out specific MAC filter */
4191 static struct i40e_mac_filter *
4192 i40e_find_mac_filter(struct i40e_vsi *vsi,
4193 struct ether_addr *macaddr)
4195 struct i40e_mac_filter *f;
4197 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4198 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4206 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4209 uint32_t vid_idx, vid_bit;
4211 if (vlan_id > ETH_VLAN_ID_MAX)
4214 vid_idx = I40E_VFTA_IDX(vlan_id);
4215 vid_bit = I40E_VFTA_BIT(vlan_id);
4217 if (vsi->vfta[vid_idx] & vid_bit)
4224 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4225 uint16_t vlan_id, bool on)
4227 uint32_t vid_idx, vid_bit;
4229 if (vlan_id > ETH_VLAN_ID_MAX)
4232 vid_idx = I40E_VFTA_IDX(vlan_id);
4233 vid_bit = I40E_VFTA_BIT(vlan_id);
4236 vsi->vfta[vid_idx] |= vid_bit;
4238 vsi->vfta[vid_idx] &= ~vid_bit;
4242 * Find all vlan options for specific mac addr,
4243 * return with actual vlan found.
4246 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4247 struct i40e_macvlan_filter *mv_f,
4248 int num, struct ether_addr *addr)
4254 * Not to use i40e_find_vlan_filter to decrease the loop time,
4255 * although the code looks complex.
4257 if (num < vsi->vlan_num)
4258 return I40E_ERR_PARAM;
4261 for (j = 0; j < I40E_VFTA_SIZE; j++) {
4263 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4264 if (vsi->vfta[j] & (1 << k)) {
4266 PMD_DRV_LOG(ERR, "vlan number "
4268 return I40E_ERR_PARAM;
4270 (void)rte_memcpy(&mv_f[i].macaddr,
4271 addr, ETH_ADDR_LEN);
4273 j * I40E_UINT32_BIT_SIZE + k;
4279 return I40E_SUCCESS;
4283 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4284 struct i40e_macvlan_filter *mv_f,
4289 struct i40e_mac_filter *f;
4291 if (num < vsi->mac_num)
4292 return I40E_ERR_PARAM;
4294 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4296 PMD_DRV_LOG(ERR, "buffer number not match");
4297 return I40E_ERR_PARAM;
4299 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4301 mv_f[i].vlan_id = vlan;
4302 mv_f[i].filter_type = f->mac_info.filter_type;
4306 return I40E_SUCCESS;
4310 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4313 struct i40e_mac_filter *f;
4314 struct i40e_macvlan_filter *mv_f;
4315 int ret = I40E_SUCCESS;
4317 if (vsi == NULL || vsi->mac_num == 0)
4318 return I40E_ERR_PARAM;
4320 /* Case that no vlan is set */
4321 if (vsi->vlan_num == 0)
4324 num = vsi->mac_num * vsi->vlan_num;
4326 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4328 PMD_DRV_LOG(ERR, "failed to allocate memory");
4329 return I40E_ERR_NO_MEMORY;
4333 if (vsi->vlan_num == 0) {
4334 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4335 (void)rte_memcpy(&mv_f[i].macaddr,
4336 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4337 mv_f[i].vlan_id = 0;
4341 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4342 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4343 vsi->vlan_num, &f->mac_info.mac_addr);
4344 if (ret != I40E_SUCCESS)
4350 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4358 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4360 struct i40e_macvlan_filter *mv_f;
4362 int ret = I40E_SUCCESS;
4364 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4365 return I40E_ERR_PARAM;
4367 /* If it's already set, just return */
4368 if (i40e_find_vlan_filter(vsi,vlan))
4369 return I40E_SUCCESS;
4371 mac_num = vsi->mac_num;
4374 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4375 return I40E_ERR_PARAM;
4378 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4381 PMD_DRV_LOG(ERR, "failed to allocate memory");
4382 return I40E_ERR_NO_MEMORY;
4385 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4387 if (ret != I40E_SUCCESS)
4390 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4392 if (ret != I40E_SUCCESS)
4395 i40e_set_vlan_filter(vsi, vlan, 1);
4405 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4407 struct i40e_macvlan_filter *mv_f;
4409 int ret = I40E_SUCCESS;
4412 * Vlan 0 is the generic filter for untagged packets
4413 * and can't be removed.
4415 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4416 return I40E_ERR_PARAM;
4418 /* If can't find it, just return */
4419 if (!i40e_find_vlan_filter(vsi, vlan))
4420 return I40E_ERR_PARAM;
4422 mac_num = vsi->mac_num;
4425 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4426 return I40E_ERR_PARAM;
4429 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4432 PMD_DRV_LOG(ERR, "failed to allocate memory");
4433 return I40E_ERR_NO_MEMORY;
4436 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4438 if (ret != I40E_SUCCESS)
4441 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4443 if (ret != I40E_SUCCESS)
4446 /* This is last vlan to remove, replace all mac filter with vlan 0 */
4447 if (vsi->vlan_num == 1) {
4448 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4449 if (ret != I40E_SUCCESS)
4452 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4453 if (ret != I40E_SUCCESS)
4457 i40e_set_vlan_filter(vsi, vlan, 0);
4467 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4469 struct i40e_mac_filter *f;
4470 struct i40e_macvlan_filter *mv_f;
4471 int i, vlan_num = 0;
4472 int ret = I40E_SUCCESS;
4474 /* If it's add and we've config it, return */
4475 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4477 return I40E_SUCCESS;
4478 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4479 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4482 * If vlan_num is 0, that's the first time to add mac,
4483 * set mask for vlan_id 0.
4485 if (vsi->vlan_num == 0) {
4486 i40e_set_vlan_filter(vsi, 0, 1);
4489 vlan_num = vsi->vlan_num;
4490 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4491 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4494 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4496 PMD_DRV_LOG(ERR, "failed to allocate memory");
4497 return I40E_ERR_NO_MEMORY;
4500 for (i = 0; i < vlan_num; i++) {
4501 mv_f[i].filter_type = mac_filter->filter_type;
4502 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4506 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4507 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4508 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4509 &mac_filter->mac_addr);
4510 if (ret != I40E_SUCCESS)
4514 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4515 if (ret != I40E_SUCCESS)
4518 /* Add the mac addr into mac list */
4519 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4521 PMD_DRV_LOG(ERR, "failed to allocate memory");
4522 ret = I40E_ERR_NO_MEMORY;
4525 (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4527 f->mac_info.filter_type = mac_filter->filter_type;
4528 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4539 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4541 struct i40e_mac_filter *f;
4542 struct i40e_macvlan_filter *mv_f;
4544 enum rte_mac_filter_type filter_type;
4545 int ret = I40E_SUCCESS;
4547 /* Can't find it, return an error */
4548 f = i40e_find_mac_filter(vsi, addr);
4550 return I40E_ERR_PARAM;
4552 vlan_num = vsi->vlan_num;
4553 filter_type = f->mac_info.filter_type;
4554 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4555 filter_type == RTE_MACVLAN_HASH_MATCH) {
4556 if (vlan_num == 0) {
4557 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4558 return I40E_ERR_PARAM;
4560 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4561 filter_type == RTE_MAC_HASH_MATCH)
4564 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4566 PMD_DRV_LOG(ERR, "failed to allocate memory");
4567 return I40E_ERR_NO_MEMORY;
4570 for (i = 0; i < vlan_num; i++) {
4571 mv_f[i].filter_type = filter_type;
4572 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4575 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4576 filter_type == RTE_MACVLAN_HASH_MATCH) {
4577 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4578 if (ret != I40E_SUCCESS)
4582 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4583 if (ret != I40E_SUCCESS)
4586 /* Remove the mac addr into mac list */
4587 TAILQ_REMOVE(&vsi->mac_list, f, next);
4597 /* Configure hash enable flags for RSS */
4599 i40e_config_hena(uint64_t flags)
4606 if (flags & ETH_RSS_FRAG_IPV4)
4607 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4608 if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
4609 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4610 if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
4611 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4612 if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
4613 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4614 if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
4615 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4616 if (flags & ETH_RSS_FRAG_IPV6)
4617 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4618 if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
4619 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4620 if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
4621 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4622 if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
4623 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4624 if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
4625 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4626 if (flags & ETH_RSS_L2_PAYLOAD)
4627 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4632 /* Parse the hash enable flags */
4634 i40e_parse_hena(uint64_t flags)
4636 uint64_t rss_hf = 0;
4640 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4641 rss_hf |= ETH_RSS_FRAG_IPV4;
4642 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4643 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
4644 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4645 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
4646 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4647 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
4648 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4649 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
4650 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4651 rss_hf |= ETH_RSS_FRAG_IPV6;
4652 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4653 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
4654 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4655 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
4656 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4657 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
4658 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4659 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
4660 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4661 rss_hf |= ETH_RSS_L2_PAYLOAD;
4668 i40e_pf_disable_rss(struct i40e_pf *pf)
4670 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4673 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4674 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4675 hena &= ~I40E_RSS_HENA_ALL;
4676 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4677 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4678 I40E_WRITE_FLUSH(hw);
4682 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4685 uint8_t hash_key_len;
4690 hash_key = (uint32_t *)(rss_conf->rss_key);
4691 hash_key_len = rss_conf->rss_key_len;
4692 if (hash_key != NULL && hash_key_len >=
4693 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4694 /* Fill in RSS hash key */
4695 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4696 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4699 rss_hf = rss_conf->rss_hf;
4700 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4701 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4702 hena &= ~I40E_RSS_HENA_ALL;
4703 hena |= i40e_config_hena(rss_hf);
4704 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4705 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4706 I40E_WRITE_FLUSH(hw);
4712 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4713 struct rte_eth_rss_conf *rss_conf)
4715 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4716 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4719 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4720 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4721 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4722 if (rss_hf != 0) /* Enable RSS */
4724 return 0; /* Nothing to do */
4727 if (rss_hf == 0) /* Disable RSS */
4730 return i40e_hw_rss_hash_set(hw, rss_conf);
4734 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4735 struct rte_eth_rss_conf *rss_conf)
4737 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4738 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4742 if (hash_key != NULL) {
4743 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4744 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4745 rss_conf->rss_key_len = i * sizeof(uint32_t);
4747 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4748 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4749 rss_conf->rss_hf = i40e_parse_hena(hena);
4755 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4757 switch (filter_type) {
4758 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4759 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4761 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4762 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4764 case RTE_TUNNEL_FILTER_IMAC_TENID:
4765 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4767 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4768 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4770 case ETH_TUNNEL_FILTER_IMAC:
4771 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4774 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4782 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4783 struct rte_eth_tunnel_filter_conf *tunnel_filter,
4787 uint8_t tun_type = 0;
4789 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4790 struct i40e_vsi *vsi = pf->main_vsi;
4791 struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
4792 struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
4794 cld_filter = rte_zmalloc("tunnel_filter",
4795 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4798 if (NULL == cld_filter) {
4799 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4802 pfilter = cld_filter;
4804 (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4805 sizeof(struct ether_addr));
4806 (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4807 sizeof(struct ether_addr));
4809 pfilter->inner_vlan = tunnel_filter->inner_vlan;
4810 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4811 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4812 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4813 &tunnel_filter->ip_addr,
4814 sizeof(pfilter->ipaddr.v4.data));
4816 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4817 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4818 &tunnel_filter->ip_addr,
4819 sizeof(pfilter->ipaddr.v6.data));
4822 /* check tunneled type */
4823 switch (tunnel_filter->tunnel_type) {
4824 case RTE_TUNNEL_TYPE_VXLAN:
4825 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4827 case RTE_TUNNEL_TYPE_NVGRE:
4828 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
4831 /* Other tunnel types is not supported. */
4832 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4833 rte_free(cld_filter);
4837 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4840 rte_free(cld_filter);
4844 pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4845 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4846 pfilter->tenant_id = tunnel_filter->tenant_id;
4847 pfilter->queue_number = tunnel_filter->queue_id;
4850 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4852 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4855 rte_free(cld_filter);
4860 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4864 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4865 if (pf->vxlan_ports[i] == port)
4873 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4877 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4879 idx = i40e_get_vxlan_port_idx(pf, port);
4881 /* Check if port already exists */
4883 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4887 /* Now check if there is space to add the new port */
4888 idx = i40e_get_vxlan_port_idx(pf, 0);
4890 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4891 "not adding port %d", port);
4895 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4898 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4902 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
4905 /* New port: add it and mark its index in the bitmap */
4906 pf->vxlan_ports[idx] = port;
4907 pf->vxlan_bitmap |= (1 << idx);
4909 if (!(pf->flags & I40E_FLAG_VXLAN))
4910 pf->flags |= I40E_FLAG_VXLAN;
4916 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4919 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4921 if (!(pf->flags & I40E_FLAG_VXLAN)) {
4922 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4926 idx = i40e_get_vxlan_port_idx(pf, port);
4929 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4933 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4934 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4938 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4941 pf->vxlan_ports[idx] = 0;
4942 pf->vxlan_bitmap &= ~(1 << idx);
4944 if (!pf->vxlan_bitmap)
4945 pf->flags &= ~I40E_FLAG_VXLAN;
4950 /* Add UDP tunneling port */
4952 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4953 struct rte_eth_udp_tunnel *udp_tunnel)
4956 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4958 if (udp_tunnel == NULL)
4961 switch (udp_tunnel->prot_type) {
4962 case RTE_TUNNEL_TYPE_VXLAN:
4963 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4966 case RTE_TUNNEL_TYPE_GENEVE:
4967 case RTE_TUNNEL_TYPE_TEREDO:
4968 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4973 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4981 /* Remove UDP tunneling port */
4983 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4984 struct rte_eth_udp_tunnel *udp_tunnel)
4987 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4989 if (udp_tunnel == NULL)
4992 switch (udp_tunnel->prot_type) {
4993 case RTE_TUNNEL_TYPE_VXLAN:
4994 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4996 case RTE_TUNNEL_TYPE_GENEVE:
4997 case RTE_TUNNEL_TYPE_TEREDO:
4998 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5002 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5010 /* Calculate the maximum number of contiguous PF queues that are configured */
5012 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
5014 struct rte_eth_dev_data *data = pf->dev_data;
5016 struct i40e_rx_queue *rxq;
5019 for (i = 0; i < pf->lan_nb_qps; i++) {
5020 rxq = data->rx_queues[i];
5021 if (rxq && rxq->q_set)
5032 i40e_pf_config_rss(struct i40e_pf *pf)
5034 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5035 struct rte_eth_rss_conf rss_conf;
5036 uint32_t i, lut = 0;
5040 * If both VMDQ and RSS enabled, not all of PF queues are configured.
5041 * It's necessary to calulate the actual PF queues that are configured.
5043 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
5044 num = i40e_pf_calc_configured_queues_num(pf);
5045 num = i40e_align_floor(num);
5047 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
5049 PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
5053 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
5057 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
5060 lut = (lut << 8) | (j & ((0x1 <<
5061 hw->func_caps.rss_table_entry_width) - 1));
5063 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
5066 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
5067 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
5068 i40e_pf_disable_rss(pf);
5071 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
5072 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
5073 /* Random default keys */
5074 static uint32_t rss_key_default[] = {0x6b793944,
5075 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
5076 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
5077 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
5079 rss_conf.rss_key = (uint8_t *)rss_key_default;
5080 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5084 return i40e_hw_rss_hash_set(hw, &rss_conf);
5088 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
5089 struct rte_eth_tunnel_filter_conf *filter)
5091 if (pf == NULL || filter == NULL) {
5092 PMD_DRV_LOG(ERR, "Invalid parameter");
5096 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
5097 PMD_DRV_LOG(ERR, "Invalid queue ID");
5101 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
5102 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
5106 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
5107 (is_zero_ether_addr(filter->outer_mac))) {
5108 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
5112 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
5113 (is_zero_ether_addr(filter->inner_mac))) {
5114 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
5122 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5125 struct rte_eth_tunnel_filter_conf *filter;
5126 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5127 int ret = I40E_SUCCESS;
5129 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
5131 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
5132 return I40E_ERR_PARAM;
5134 switch (filter_op) {
5135 case RTE_ETH_FILTER_NOP:
5136 if (!(pf->flags & I40E_FLAG_VXLAN))
5137 ret = I40E_NOT_SUPPORTED;
5138 case RTE_ETH_FILTER_ADD:
5139 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
5141 case RTE_ETH_FILTER_DELETE:
5142 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
5145 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
5146 ret = I40E_ERR_PARAM;
5154 i40e_pf_config_mq_rx(struct i40e_pf *pf)
5157 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
5159 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
5160 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
5165 if (mq_mode & ETH_MQ_RX_RSS_FLAG)
5166 ret = i40e_pf_config_rss(pf);
5168 i40e_pf_disable_rss(pf);
5173 /* Get the symmetric hash enable configurations per port */
5175 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
5177 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5179 *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
5182 /* Set the symmetric hash enable configurations per port */
5184 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
5186 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5189 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
5190 PMD_DRV_LOG(INFO, "Symmetric hash has already "
5194 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5196 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
5197 PMD_DRV_LOG(INFO, "Symmetric hash has already "
5201 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5203 I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
5204 I40E_WRITE_FLUSH(hw);
5208 * Get global configurations of hash function type and symmetric hash enable
5209 * per flow type (pctype). Note that global configuration means it affects all
5210 * the ports on the same NIC.
5213 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
5214 struct rte_eth_hash_global_conf *g_cfg)
5216 uint32_t reg, mask = I40E_FLOW_TYPES;
5218 enum i40e_filter_pctype pctype;
5220 memset(g_cfg, 0, sizeof(*g_cfg));
5221 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5222 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
5223 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
5225 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
5226 PMD_DRV_LOG(DEBUG, "Hash function is %s",
5227 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
5229 for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
5230 if (!(mask & (1UL << i)))
5232 mask &= ~(1UL << i);
5233 /* Bit set indicats the coresponding flow type is supported */
5234 g_cfg->valid_bit_mask[0] |= (1UL << i);
5235 pctype = i40e_flowtype_to_pctype(i);
5236 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
5237 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
5238 g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
5245 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
5248 uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
5250 if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
5251 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5252 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
5253 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
5259 * As i40e supports less than 32 flow types, only first 32 bits need to
5262 mask0 = g_cfg->valid_bit_mask[0];
5263 for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
5265 /* Check if any unsupported flow type configured */
5266 if ((mask0 | i40e_mask) ^ i40e_mask)
5269 if (g_cfg->valid_bit_mask[i])
5277 PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
5283 * Set global configurations of hash function type and symmetric hash enable
5284 * per flow type (pctype). Note any modifying global configuration will affect
5285 * all the ports on the same NIC.
5288 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
5289 struct rte_eth_hash_global_conf *g_cfg)
5294 uint32_t mask0 = g_cfg->valid_bit_mask[0];
5295 enum i40e_filter_pctype pctype;
5297 /* Check the input parameters */
5298 ret = i40e_hash_global_config_check(g_cfg);
5302 for (i = 0; mask0 && i < UINT32_BIT; i++) {
5303 if (!(mask0 & (1UL << i)))
5305 mask0 &= ~(1UL << i);
5306 pctype = i40e_flowtype_to_pctype(i);
5307 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
5308 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
5309 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
5312 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5313 if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
5315 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
5316 PMD_DRV_LOG(DEBUG, "Hash function already set to "
5320 reg |= I40E_GLQF_CTL_HTOEP_MASK;
5321 } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
5323 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
5324 PMD_DRV_LOG(DEBUG, "Hash function already set to "
5328 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
5330 /* Use the default, and keep it as it is */
5333 I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
5336 I40E_WRITE_FLUSH(hw);
5342 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5347 PMD_DRV_LOG(ERR, "Invalid pointer");
5351 switch (info->info_type) {
5352 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5353 i40e_get_symmetric_hash_enable_per_port(hw,
5354 &(info->info.enable));
5356 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5357 ret = i40e_get_hash_filter_global_config(hw,
5358 &(info->info.global_conf));
5361 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5371 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5376 PMD_DRV_LOG(ERR, "Invalid pointer");
5380 switch (info->info_type) {
5381 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5382 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
5384 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5385 ret = i40e_set_hash_filter_global_config(hw,
5386 &(info->info.global_conf));
5389 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5398 /* Operations for hash function */
5400 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
5401 enum rte_filter_op filter_op,
5404 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5407 switch (filter_op) {
5408 case RTE_ETH_FILTER_NOP:
5410 case RTE_ETH_FILTER_GET:
5411 ret = i40e_hash_filter_get(hw,
5412 (struct rte_eth_hash_filter_info *)arg);
5414 case RTE_ETH_FILTER_SET:
5415 ret = i40e_hash_filter_set(hw,
5416 (struct rte_eth_hash_filter_info *)arg);
5419 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
5429 * Configure ethertype filter, which can director packet by filtering
5430 * with mac address and ether_type or only ether_type
5433 i40e_ethertype_filter_set(struct i40e_pf *pf,
5434 struct rte_eth_ethertype_filter *filter,
5437 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5438 struct i40e_control_filter_stats stats;
5442 if (filter->queue >= pf->dev_data->nb_rx_queues) {
5443 PMD_DRV_LOG(ERR, "Invalid queue ID");
5446 if (filter->ether_type == ETHER_TYPE_IPv4 ||
5447 filter->ether_type == ETHER_TYPE_IPv6) {
5448 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5449 " control packet filter.", filter->ether_type);
5452 if (filter->ether_type == ETHER_TYPE_VLAN)
5453 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
5456 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5457 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5458 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5459 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5460 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5462 memset(&stats, 0, sizeof(stats));
5463 ret = i40e_aq_add_rem_control_packet_filter(hw,
5464 filter->mac_addr.addr_bytes,
5465 filter->ether_type, flags,
5467 filter->queue, add, &stats, NULL);
5469 PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
5470 " mac_etype_used = %u, etype_used = %u,"
5471 " mac_etype_free = %u, etype_free = %u\n",
5472 ret, stats.mac_etype_used, stats.etype_used,
5473 stats.mac_etype_free, stats.etype_free);
5480 * Handle operations for ethertype filter.
5483 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
5484 enum rte_filter_op filter_op,
5487 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5490 if (filter_op == RTE_ETH_FILTER_NOP)
5494 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5499 switch (filter_op) {
5500 case RTE_ETH_FILTER_ADD:
5501 ret = i40e_ethertype_filter_set(pf,
5502 (struct rte_eth_ethertype_filter *)arg,
5505 case RTE_ETH_FILTER_DELETE:
5506 ret = i40e_ethertype_filter_set(pf,
5507 (struct rte_eth_ethertype_filter *)arg,
5511 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5519 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
5520 enum rte_filter_type filter_type,
5521 enum rte_filter_op filter_op,
5529 switch (filter_type) {
5530 case RTE_ETH_FILTER_HASH:
5531 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
5533 case RTE_ETH_FILTER_MACVLAN:
5534 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5536 case RTE_ETH_FILTER_ETHERTYPE:
5537 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
5539 case RTE_ETH_FILTER_TUNNEL:
5540 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5542 case RTE_ETH_FILTER_FDIR:
5543 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
5546 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5556 * As some registers wouldn't be reset unless a global hardware reset,
5557 * hardware initialization is needed to put those registers into an
5558 * expected initial state.
5561 i40e_hw_init(struct i40e_hw *hw)
5563 /* clear the PF Queue Filter control register */
5564 I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
5566 /* Disable symmetric hash per port */
5567 i40e_set_symmetric_hash_enable_per_port(hw, 0);
5570 enum i40e_filter_pctype
5571 i40e_flowtype_to_pctype(uint16_t flow_type)
5573 static const enum i40e_filter_pctype pctype_table[] = {
5574 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
5575 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
5576 I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5577 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
5578 I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5579 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
5580 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5581 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
5582 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5583 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
5584 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
5585 I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
5586 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
5587 I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
5588 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
5589 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
5590 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
5591 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
5592 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
5595 return pctype_table[flow_type];
5599 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
5601 static const uint16_t flowtype_table[] = {
5602 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
5603 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
5604 RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
5605 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
5606 RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
5607 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
5608 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
5609 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
5610 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
5611 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
5612 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
5613 RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
5614 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
5615 RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
5616 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
5617 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
5618 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
5619 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
5620 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
5623 return flowtype_table[pctype];
5627 * On X710, performance number is far from the expectation on recent firmware
5628 * versions; on XL710, performance number is also far from the expectation on
5629 * recent firmware versions, if promiscuous mode is disabled, or promiscuous
5630 * mode is enabled and port MAC address is equal to the packet destination MAC
5631 * address. The fix for this issue may not be integrated in the following
5632 * firmware version. So the workaround in software driver is needed. It needs
5633 * to modify the initial values of 3 internal only registers for both X710 and
5634 * XL710. Note that the values for X710 or XL710 could be different, and the
5635 * workaround can be removed when it is fixed in firmware in the future.
5638 /* For both X710 and XL710 */
5639 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
5640 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
5642 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
5643 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
5646 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
5648 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
5649 #define I40E_GL_SWR_PM_UP_THR 0x269FBC
5652 i40e_configure_registers(struct i40e_hw *hw)
5658 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
5659 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
5660 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
5666 for (i = 0; i < RTE_DIM(reg_table); i++) {
5667 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
5668 if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
5670 I40E_GL_SWR_PM_UP_THR_SF_VALUE;
5673 I40E_GL_SWR_PM_UP_THR_EF_VALUE;
5676 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
5679 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
5683 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
5684 reg_table[i].addr, reg);
5685 if (reg == reg_table[i].val)
5688 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
5689 reg_table[i].val, NULL);
5691 PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
5692 "address of 0x%"PRIx32, reg_table[i].val,
5696 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
5697 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);