4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 #define I40E_DEFAULT_RX_FREE_THRESH 32
62 #define I40E_DEFAULT_RX_PTHRESH 8
63 #define I40E_DEFAULT_RX_HTHRESH 8
64 #define I40E_DEFAULT_RX_WTHRESH 0
66 #define I40E_DEFAULT_TX_FREE_THRESH 32
67 #define I40E_DEFAULT_TX_PTHRESH 32
68 #define I40E_DEFAULT_TX_HTHRESH 0
69 #define I40E_DEFAULT_TX_WTHRESH 0
70 #define I40E_DEFAULT_TX_RSBIT_THRESH 32
72 /* Maximun number of MAC addresses */
73 #define I40E_NUM_MACADDR_MAX 64
74 #define I40E_CLEAR_PXE_WAIT_MS 200
76 /* Maximun number of capability elements */
77 #define I40E_MAX_CAP_ELE_NUM 128
79 /* Wait count and inteval */
80 #define I40E_CHK_Q_ENA_COUNT 1000
81 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
83 /* Maximun number of VSI */
84 #define I40E_MAX_NUM_VSIS (384UL)
86 /* Bit shift and mask */
87 #define I40E_16_BIT_SHIFT 16
88 #define I40E_16_BIT_MASK 0xFFFF
89 #define I40E_32_BIT_SHIFT 32
90 #define I40E_32_BIT_MASK 0xFFFFFFFF
91 #define I40E_48_BIT_SHIFT 48
92 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
94 /* Default queue interrupt throttling time in microseconds*/
95 #define I40E_ITR_INDEX_DEFAULT 0
96 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
97 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
99 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
101 static int eth_i40e_dev_init(\
102 __attribute__((unused)) struct eth_driver *eth_drv,
103 struct rte_eth_dev *eth_dev);
104 static int i40e_dev_configure(struct rte_eth_dev *dev);
105 static int i40e_dev_start(struct rte_eth_dev *dev);
106 static void i40e_dev_stop(struct rte_eth_dev *dev);
107 static void i40e_dev_close(struct rte_eth_dev *dev);
108 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
109 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
110 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
111 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
112 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
113 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
114 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_stats *stats);
116 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
117 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
121 static void i40e_dev_info_get(struct rte_eth_dev *dev,
122 struct rte_eth_dev_info *dev_info);
123 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
126 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
127 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
128 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
131 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
132 static int i40e_dev_led_on(struct rte_eth_dev *dev);
133 static int i40e_dev_led_off(struct rte_eth_dev *dev);
134 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
135 struct rte_eth_fc_conf *fc_conf);
136 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
137 struct rte_eth_pfc_conf *pfc_conf);
138 static void i40e_macaddr_add(struct rte_eth_dev *dev,
139 struct ether_addr *mac_addr,
142 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
143 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
144 struct rte_eth_rss_reta *reta_conf);
145 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
146 struct rte_eth_rss_reta *reta_conf);
148 static int i40e_get_cap(struct i40e_hw *hw);
149 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
150 static int i40e_pf_setup(struct i40e_pf *pf);
151 static int i40e_vsi_init(struct i40e_vsi *vsi);
152 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
153 bool offset_loaded, uint64_t *offset, uint64_t *stat);
154 static void i40e_stat_update_48(struct i40e_hw *hw,
160 static void i40e_pf_config_irq0(struct i40e_hw *hw);
161 static void i40e_dev_interrupt_handler(
162 __rte_unused struct rte_intr_handle *handle, void *param);
163 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
164 uint32_t base, uint32_t num);
165 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
166 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
168 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
170 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
171 static int i40e_veb_release(struct i40e_veb *veb);
172 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
173 struct i40e_vsi *vsi);
174 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
175 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
176 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
177 struct i40e_macvlan_filter *mv_f,
179 struct ether_addr *addr);
180 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
181 struct i40e_macvlan_filter *mv_f,
184 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
185 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
186 struct rte_eth_rss_conf *rss_conf);
187 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
188 struct rte_eth_rss_conf *rss_conf);
189 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
190 enum rte_filter_type filter_type,
191 enum rte_filter_op filter_op,
194 /* Default hash key buffer for RSS */
195 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
197 static struct rte_pci_id pci_id_i40e_map[] = {
198 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
199 #include "rte_pci_dev_ids.h"
200 { .vendor_id = 0, /* sentinel */ },
203 static struct eth_dev_ops i40e_eth_dev_ops = {
204 .dev_configure = i40e_dev_configure,
205 .dev_start = i40e_dev_start,
206 .dev_stop = i40e_dev_stop,
207 .dev_close = i40e_dev_close,
208 .promiscuous_enable = i40e_dev_promiscuous_enable,
209 .promiscuous_disable = i40e_dev_promiscuous_disable,
210 .allmulticast_enable = i40e_dev_allmulticast_enable,
211 .allmulticast_disable = i40e_dev_allmulticast_disable,
212 .dev_set_link_up = i40e_dev_set_link_up,
213 .dev_set_link_down = i40e_dev_set_link_down,
214 .link_update = i40e_dev_link_update,
215 .stats_get = i40e_dev_stats_get,
216 .stats_reset = i40e_dev_stats_reset,
217 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
218 .dev_infos_get = i40e_dev_info_get,
219 .vlan_filter_set = i40e_vlan_filter_set,
220 .vlan_tpid_set = i40e_vlan_tpid_set,
221 .vlan_offload_set = i40e_vlan_offload_set,
222 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
223 .vlan_pvid_set = i40e_vlan_pvid_set,
224 .rx_queue_start = i40e_dev_rx_queue_start,
225 .rx_queue_stop = i40e_dev_rx_queue_stop,
226 .tx_queue_start = i40e_dev_tx_queue_start,
227 .tx_queue_stop = i40e_dev_tx_queue_stop,
228 .rx_queue_setup = i40e_dev_rx_queue_setup,
229 .rx_queue_release = i40e_dev_rx_queue_release,
230 .rx_queue_count = i40e_dev_rx_queue_count,
231 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
232 .tx_queue_setup = i40e_dev_tx_queue_setup,
233 .tx_queue_release = i40e_dev_tx_queue_release,
234 .dev_led_on = i40e_dev_led_on,
235 .dev_led_off = i40e_dev_led_off,
236 .flow_ctrl_set = i40e_flow_ctrl_set,
237 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
238 .mac_addr_add = i40e_macaddr_add,
239 .mac_addr_remove = i40e_macaddr_remove,
240 .reta_update = i40e_dev_rss_reta_update,
241 .reta_query = i40e_dev_rss_reta_query,
242 .rss_hash_update = i40e_dev_rss_hash_update,
243 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
244 .filter_ctrl = i40e_dev_filter_ctrl,
247 static struct eth_driver rte_i40e_pmd = {
249 .name = "rte_i40e_pmd",
250 .id_table = pci_id_i40e_map,
251 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
253 .eth_dev_init = eth_i40e_dev_init,
254 .dev_private_size = sizeof(struct i40e_adapter),
258 i40e_prev_power_of_2(int n)
276 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
277 struct rte_eth_link *link)
279 struct rte_eth_link *dst = link;
280 struct rte_eth_link *src = &(dev->data->dev_link);
282 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
283 *(uint64_t *)src) == 0)
290 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
291 struct rte_eth_link *link)
293 struct rte_eth_link *dst = &(dev->data->dev_link);
294 struct rte_eth_link *src = link;
296 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297 *(uint64_t *)src) == 0)
304 * Driver initialization routine.
305 * Invoked once at EAL init time.
306 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
309 rte_i40e_pmd_init(const char *name __rte_unused,
310 const char *params __rte_unused)
312 PMD_INIT_FUNC_TRACE();
313 rte_eth_driver_register(&rte_i40e_pmd);
318 static struct rte_driver rte_i40e_driver = {
320 .init = rte_i40e_pmd_init,
323 PMD_REGISTER_DRIVER(rte_i40e_driver);
326 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
327 struct rte_eth_dev *dev)
329 struct rte_pci_device *pci_dev;
330 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
331 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
332 struct i40e_vsi *vsi;
337 PMD_INIT_FUNC_TRACE();
339 dev->dev_ops = &i40e_eth_dev_ops;
340 dev->rx_pkt_burst = i40e_recv_pkts;
341 dev->tx_pkt_burst = i40e_xmit_pkts;
343 /* for secondary processes, we don't initialise any further as primary
344 * has already done this work. Only check we don't need a different
346 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
347 if (dev->data->scattered_rx)
348 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
351 pci_dev = dev->pci_dev;
352 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
353 pf->adapter->eth_dev = dev;
354 pf->dev_data = dev->data;
356 hw->back = I40E_PF_TO_ADAPTER(pf);
357 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
359 PMD_INIT_LOG(ERR, "Hardware is not available, "
360 "as address is NULL");
364 hw->vendor_id = pci_dev->id.vendor_id;
365 hw->device_id = pci_dev->id.device_id;
366 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
367 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
368 hw->bus.device = pci_dev->addr.devid;
369 hw->bus.func = pci_dev->addr.function;
371 /* Make sure all is clean before doing PF reset */
374 /* Reset here to make sure all is clean for each PF */
375 ret = i40e_pf_reset(hw);
377 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
381 /* Initialize the shared code (base driver) */
382 ret = i40e_init_shared_code(hw);
384 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
388 /* Initialize the parameters for adminq */
389 i40e_init_adminq_parameter(hw);
390 ret = i40e_init_adminq(hw);
391 if (ret != I40E_SUCCESS) {
392 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
395 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
396 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
397 hw->aq.api_maj_ver, hw->aq.api_min_ver,
398 ((hw->nvm.version >> 12) & 0xf),
399 ((hw->nvm.version >> 4) & 0xff),
400 (hw->nvm.version & 0xf), hw->nvm.eetrack);
403 ret = i40e_aq_stop_lldp(hw, true, NULL);
404 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
405 PMD_INIT_LOG(INFO, "Failed to stop lldp");
408 i40e_clear_pxe_mode(hw);
410 /* Get hw capabilities */
411 ret = i40e_get_cap(hw);
412 if (ret != I40E_SUCCESS) {
413 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
414 goto err_get_capabilities;
417 /* Initialize parameters for PF */
418 ret = i40e_pf_parameter_init(dev);
420 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
421 goto err_parameter_init;
424 /* Initialize the queue management */
425 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
427 PMD_INIT_LOG(ERR, "Failed to init queue pool");
428 goto err_qp_pool_init;
430 ret = i40e_res_pool_init(&pf->msix_pool, 1,
431 hw->func_caps.num_msix_vectors - 1);
433 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
434 goto err_msix_pool_init;
437 /* Initialize lan hmc */
438 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
439 hw->func_caps.num_rx_qp, 0, 0);
440 if (ret != I40E_SUCCESS) {
441 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
442 goto err_init_lan_hmc;
445 /* Configure lan hmc */
446 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
447 if (ret != I40E_SUCCESS) {
448 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
449 goto err_configure_lan_hmc;
452 /* Get and check the mac address */
453 i40e_get_mac_addr(hw, hw->mac.addr);
454 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
455 PMD_INIT_LOG(ERR, "mac address is not valid");
457 goto err_get_mac_addr;
459 /* Copy the permanent MAC address */
460 ether_addr_copy((struct ether_addr *) hw->mac.addr,
461 (struct ether_addr *) hw->mac.perm_addr);
463 /* Disable flow control */
464 hw->fc.requested_mode = I40E_FC_NONE;
465 i40e_set_fc(hw, &aq_fail, TRUE);
467 /* PF setup, which includes VSI setup */
468 ret = i40e_pf_setup(pf);
470 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
471 goto err_setup_pf_switch;
476 /* Disable double vlan by default */
477 i40e_vsi_config_double_vlan(vsi, FALSE);
479 if (!vsi->max_macaddrs)
480 len = ETHER_ADDR_LEN;
482 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
484 /* Should be after VSI initialized */
485 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
486 if (!dev->data->mac_addrs) {
487 PMD_INIT_LOG(ERR, "Failed to allocated memory "
488 "for storing mac address");
489 goto err_get_mac_addr;
491 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
492 &dev->data->mac_addrs[0]);
494 /* initialize pf host driver to setup SRIOV resource if applicable */
495 i40e_pf_host_init(dev);
497 /* register callback func to eal lib */
498 rte_intr_callback_register(&(pci_dev->intr_handle),
499 i40e_dev_interrupt_handler, (void *)dev);
501 /* configure and enable device interrupt */
502 i40e_pf_config_irq0(hw);
503 i40e_pf_enable_irq0(hw);
505 /* enable uio intr after callback register */
506 rte_intr_enable(&(pci_dev->intr_handle));
511 rte_free(pf->main_vsi);
513 err_configure_lan_hmc:
514 (void)i40e_shutdown_lan_hmc(hw);
516 i40e_res_pool_destroy(&pf->msix_pool);
518 i40e_res_pool_destroy(&pf->qp_pool);
521 err_get_capabilities:
522 (void)i40e_shutdown_adminq(hw);
528 i40e_dev_configure(struct rte_eth_dev *dev)
530 return i40e_dev_init_vlan(dev);
534 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
536 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
537 uint16_t msix_vect = vsi->msix_intr;
540 for (i = 0; i < vsi->nb_qps; i++) {
541 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
542 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
546 if (vsi->type != I40E_VSI_SRIOV) {
547 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
548 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
552 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
553 vsi->user_param + (msix_vect - 1);
555 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
557 I40E_WRITE_FLUSH(hw);
560 static inline uint16_t
561 i40e_calc_itr_interval(int16_t interval)
563 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
564 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
566 /* Convert to hardware count, as writing each 1 represents 2 us */
571 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
574 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
575 uint16_t msix_vect = vsi->msix_intr;
576 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
579 for (i = 0; i < vsi->nb_qps; i++)
580 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
582 /* Bind all RX queues to allocated MSIX interrupt */
583 for (i = 0; i < vsi->nb_qps; i++) {
584 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
585 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
586 ((vsi->base_queue + i + 1) <<
587 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
588 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
589 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
591 if (i == vsi->nb_qps - 1)
592 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
593 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
596 /* Write first RX queue to Link list register as the head element */
597 if (vsi->type != I40E_VSI_SRIOV) {
598 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
599 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
600 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
602 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
603 msix_vect - 1), interval);
605 /* Disable auto-mask on enabling of all none-zero interrupt */
606 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
607 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
611 /* num_msix_vectors_vf needs to minus irq0 */
612 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
613 vsi->user_param + (msix_vect - 1);
615 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
616 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
617 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
620 I40E_WRITE_FLUSH(hw);
624 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
626 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
627 uint16_t interval = i40e_calc_itr_interval(\
628 RTE_LIBRTE_I40E_ITR_INTERVAL);
630 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
631 I40E_PFINT_DYN_CTLN_INTENA_MASK |
632 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
633 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
634 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
638 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
640 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
642 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
645 static inline uint8_t
646 i40e_parse_link_speed(uint16_t eth_link_speed)
648 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
650 switch (eth_link_speed) {
651 case ETH_LINK_SPEED_40G:
652 link_speed = I40E_LINK_SPEED_40GB;
654 case ETH_LINK_SPEED_20G:
655 link_speed = I40E_LINK_SPEED_20GB;
657 case ETH_LINK_SPEED_10G:
658 link_speed = I40E_LINK_SPEED_10GB;
660 case ETH_LINK_SPEED_1000:
661 link_speed = I40E_LINK_SPEED_1GB;
663 case ETH_LINK_SPEED_100:
664 link_speed = I40E_LINK_SPEED_100MB;
672 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
674 enum i40e_status_code status;
675 struct i40e_aq_get_phy_abilities_resp phy_ab;
676 struct i40e_aq_set_phy_config phy_conf;
677 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
678 I40E_AQ_PHY_FLAG_PAUSE_RX |
679 I40E_AQ_PHY_FLAG_LOW_POWER;
680 const uint8_t advt = I40E_LINK_SPEED_40GB |
681 I40E_LINK_SPEED_10GB |
682 I40E_LINK_SPEED_1GB |
683 I40E_LINK_SPEED_100MB;
686 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
691 memset(&phy_conf, 0, sizeof(phy_conf));
693 /* bits 0-2 use the values from get_phy_abilities_resp */
695 abilities |= phy_ab.abilities & mask;
697 /* update ablities and speed */
698 if (abilities & I40E_AQ_PHY_AN_ENABLED)
699 phy_conf.link_speed = advt;
701 phy_conf.link_speed = force_speed;
703 phy_conf.abilities = abilities;
705 /* use get_phy_abilities_resp value for the rest */
706 phy_conf.phy_type = phy_ab.phy_type;
707 phy_conf.eee_capability = phy_ab.eee_capability;
708 phy_conf.eeer = phy_ab.eeer_val;
709 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
711 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
712 phy_ab.abilities, phy_ab.link_speed);
713 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
714 phy_conf.abilities, phy_conf.link_speed);
716 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
724 i40e_apply_link_speed(struct rte_eth_dev *dev)
727 uint8_t abilities = 0;
728 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
729 struct rte_eth_conf *conf = &dev->data->dev_conf;
731 speed = i40e_parse_link_speed(conf->link_speed);
732 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
733 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
734 abilities |= I40E_AQ_PHY_AN_ENABLED;
736 abilities |= I40E_AQ_PHY_LINK_ENABLED;
738 return i40e_phy_conf_link(hw, abilities, speed);
742 i40e_dev_start(struct rte_eth_dev *dev)
744 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
745 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
746 struct i40e_vsi *vsi = pf->main_vsi;
749 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
750 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
751 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
752 dev->data->dev_conf.link_duplex,
758 ret = i40e_vsi_init(vsi);
759 if (ret != I40E_SUCCESS) {
760 PMD_DRV_LOG(ERR, "Failed to init VSI");
764 /* Map queues with MSIX interrupt */
765 i40e_vsi_queues_bind_intr(vsi);
766 i40e_vsi_enable_queues_intr(vsi);
768 /* Enable all queues which have been configured */
769 ret = i40e_vsi_switch_queues(vsi, TRUE);
770 if (ret != I40E_SUCCESS) {
771 PMD_DRV_LOG(ERR, "Failed to enable VSI");
775 /* Enable receiving broadcast packets */
776 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
777 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
778 if (ret != I40E_SUCCESS)
779 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
782 /* Apply link configure */
783 ret = i40e_apply_link_speed(dev);
784 if (I40E_SUCCESS != ret) {
785 PMD_DRV_LOG(ERR, "Fail to apply link setting");
792 i40e_vsi_switch_queues(vsi, FALSE);
798 i40e_dev_stop(struct rte_eth_dev *dev)
800 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
801 struct i40e_vsi *vsi = pf->main_vsi;
803 /* Disable all queues */
804 i40e_vsi_switch_queues(vsi, FALSE);
807 i40e_dev_set_link_down(dev);
809 /* un-map queues with interrupt registers */
810 i40e_vsi_disable_queues_intr(vsi);
811 i40e_vsi_queues_unbind_intr(vsi);
815 i40e_dev_close(struct rte_eth_dev *dev)
817 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
818 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821 PMD_INIT_FUNC_TRACE();
825 /* Disable interrupt */
826 i40e_pf_disable_irq0(hw);
827 rte_intr_disable(&(dev->pci_dev->intr_handle));
829 /* shutdown and destroy the HMC */
830 i40e_shutdown_lan_hmc(hw);
832 /* release all the existing VSIs and VEBs */
833 i40e_vsi_release(pf->main_vsi);
835 /* shutdown the adminq */
836 i40e_aq_queue_shutdown(hw, true);
837 i40e_shutdown_adminq(hw);
839 i40e_res_pool_destroy(&pf->qp_pool);
840 i40e_res_pool_destroy(&pf->msix_pool);
842 /* force a PF reset to clean anything leftover */
843 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
844 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
845 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
846 I40E_WRITE_FLUSH(hw);
850 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
852 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
853 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
854 struct i40e_vsi *vsi = pf->main_vsi;
857 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
859 if (status != I40E_SUCCESS)
860 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
862 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
864 if (status != I40E_SUCCESS)
865 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
870 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
872 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
873 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
874 struct i40e_vsi *vsi = pf->main_vsi;
877 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
879 if (status != I40E_SUCCESS)
880 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
882 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
884 if (status != I40E_SUCCESS)
885 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
889 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
891 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
892 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893 struct i40e_vsi *vsi = pf->main_vsi;
896 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
897 if (ret != I40E_SUCCESS)
898 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
902 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
904 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
905 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
906 struct i40e_vsi *vsi = pf->main_vsi;
909 if (dev->data->promiscuous == 1)
910 return; /* must remain in all_multicast mode */
912 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
913 vsi->seid, FALSE, NULL);
914 if (ret != I40E_SUCCESS)
915 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
919 * Set device link up.
922 i40e_dev_set_link_up(struct rte_eth_dev *dev)
924 /* re-apply link speed setting */
925 return i40e_apply_link_speed(dev);
929 * Set device link down.
932 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
934 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
935 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
936 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
938 return i40e_phy_conf_link(hw, abilities, speed);
942 i40e_dev_link_update(struct rte_eth_dev *dev,
943 __rte_unused int wait_to_complete)
945 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
946 struct i40e_link_status link_status;
947 struct rte_eth_link link, old;
950 memset(&link, 0, sizeof(link));
951 memset(&old, 0, sizeof(old));
952 memset(&link_status, 0, sizeof(link_status));
953 rte_i40e_dev_atomic_read_link_status(dev, &old);
955 /* Get link status information from hardware */
956 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
957 if (status != I40E_SUCCESS) {
958 link.link_speed = ETH_LINK_SPEED_100;
959 link.link_duplex = ETH_LINK_FULL_DUPLEX;
960 PMD_DRV_LOG(ERR, "Failed to get link info");
964 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
966 if (!link.link_status)
969 /* i40e uses full duplex only */
970 link.link_duplex = ETH_LINK_FULL_DUPLEX;
972 /* Parse the link status */
973 switch (link_status.link_speed) {
974 case I40E_LINK_SPEED_100MB:
975 link.link_speed = ETH_LINK_SPEED_100;
977 case I40E_LINK_SPEED_1GB:
978 link.link_speed = ETH_LINK_SPEED_1000;
980 case I40E_LINK_SPEED_10GB:
981 link.link_speed = ETH_LINK_SPEED_10G;
983 case I40E_LINK_SPEED_20GB:
984 link.link_speed = ETH_LINK_SPEED_20G;
986 case I40E_LINK_SPEED_40GB:
987 link.link_speed = ETH_LINK_SPEED_40G;
990 link.link_speed = ETH_LINK_SPEED_100;
995 rte_i40e_dev_atomic_write_link_status(dev, &link);
996 if (link.link_status == old.link_status)
1002 /* Get all the statistics of a VSI */
1004 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1006 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1007 struct i40e_eth_stats *nes = &vsi->eth_stats;
1008 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1009 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1011 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1012 vsi->offset_loaded, &oes->rx_bytes,
1014 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1015 vsi->offset_loaded, &oes->rx_unicast,
1017 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1018 vsi->offset_loaded, &oes->rx_multicast,
1019 &nes->rx_multicast);
1020 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1021 vsi->offset_loaded, &oes->rx_broadcast,
1022 &nes->rx_broadcast);
1023 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1024 &oes->rx_discards, &nes->rx_discards);
1025 /* GLV_REPC not supported */
1026 /* GLV_RMPC not supported */
1027 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1028 &oes->rx_unknown_protocol,
1029 &nes->rx_unknown_protocol);
1030 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1031 vsi->offset_loaded, &oes->tx_bytes,
1033 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1034 vsi->offset_loaded, &oes->tx_unicast,
1036 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1037 vsi->offset_loaded, &oes->tx_multicast,
1038 &nes->tx_multicast);
1039 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1040 vsi->offset_loaded, &oes->tx_broadcast,
1041 &nes->tx_broadcast);
1042 /* GLV_TDPC not supported */
1043 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1044 &oes->tx_errors, &nes->tx_errors);
1045 vsi->offset_loaded = true;
1047 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1049 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
1050 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
1051 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
1052 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
1053 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
1054 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1055 nes->rx_unknown_protocol);
1056 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
1057 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
1058 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
1059 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
1060 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
1061 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
1062 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1066 /* Get all statistics of a port */
1068 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1071 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1072 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1073 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1074 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1076 /* Get statistics of struct i40e_eth_stats */
1077 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1078 I40E_GLPRT_GORCL(hw->port),
1079 pf->offset_loaded, &os->eth.rx_bytes,
1081 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1082 I40E_GLPRT_UPRCL(hw->port),
1083 pf->offset_loaded, &os->eth.rx_unicast,
1084 &ns->eth.rx_unicast);
1085 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1086 I40E_GLPRT_MPRCL(hw->port),
1087 pf->offset_loaded, &os->eth.rx_multicast,
1088 &ns->eth.rx_multicast);
1089 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1090 I40E_GLPRT_BPRCL(hw->port),
1091 pf->offset_loaded, &os->eth.rx_broadcast,
1092 &ns->eth.rx_broadcast);
1093 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1094 pf->offset_loaded, &os->eth.rx_discards,
1095 &ns->eth.rx_discards);
1096 /* GLPRT_REPC not supported */
1097 /* GLPRT_RMPC not supported */
1098 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1100 &os->eth.rx_unknown_protocol,
1101 &ns->eth.rx_unknown_protocol);
1102 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1103 I40E_GLPRT_GOTCL(hw->port),
1104 pf->offset_loaded, &os->eth.tx_bytes,
1106 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1107 I40E_GLPRT_UPTCL(hw->port),
1108 pf->offset_loaded, &os->eth.tx_unicast,
1109 &ns->eth.tx_unicast);
1110 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1111 I40E_GLPRT_MPTCL(hw->port),
1112 pf->offset_loaded, &os->eth.tx_multicast,
1113 &ns->eth.tx_multicast);
1114 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1115 I40E_GLPRT_BPTCL(hw->port),
1116 pf->offset_loaded, &os->eth.tx_broadcast,
1117 &ns->eth.tx_broadcast);
1118 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1119 pf->offset_loaded, &os->eth.tx_discards,
1120 &ns->eth.tx_discards);
1121 /* GLPRT_TEPC not supported */
1123 /* additional port specific stats */
1124 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1125 pf->offset_loaded, &os->tx_dropped_link_down,
1126 &ns->tx_dropped_link_down);
1127 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1128 pf->offset_loaded, &os->crc_errors,
1130 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1131 pf->offset_loaded, &os->illegal_bytes,
1132 &ns->illegal_bytes);
1133 /* GLPRT_ERRBC not supported */
1134 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1135 pf->offset_loaded, &os->mac_local_faults,
1136 &ns->mac_local_faults);
1137 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1138 pf->offset_loaded, &os->mac_remote_faults,
1139 &ns->mac_remote_faults);
1140 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1141 pf->offset_loaded, &os->rx_length_errors,
1142 &ns->rx_length_errors);
1143 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1144 pf->offset_loaded, &os->link_xon_rx,
1146 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1147 pf->offset_loaded, &os->link_xoff_rx,
1149 for (i = 0; i < 8; i++) {
1150 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1152 &os->priority_xon_rx[i],
1153 &ns->priority_xon_rx[i]);
1154 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1156 &os->priority_xoff_rx[i],
1157 &ns->priority_xoff_rx[i]);
1159 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1160 pf->offset_loaded, &os->link_xon_tx,
1162 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1163 pf->offset_loaded, &os->link_xoff_tx,
1165 for (i = 0; i < 8; i++) {
1166 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1168 &os->priority_xon_tx[i],
1169 &ns->priority_xon_tx[i]);
1170 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1172 &os->priority_xoff_tx[i],
1173 &ns->priority_xoff_tx[i]);
1174 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1176 &os->priority_xon_2_xoff[i],
1177 &ns->priority_xon_2_xoff[i]);
1179 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1180 I40E_GLPRT_PRC64L(hw->port),
1181 pf->offset_loaded, &os->rx_size_64,
1183 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1184 I40E_GLPRT_PRC127L(hw->port),
1185 pf->offset_loaded, &os->rx_size_127,
1187 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1188 I40E_GLPRT_PRC255L(hw->port),
1189 pf->offset_loaded, &os->rx_size_255,
1191 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1192 I40E_GLPRT_PRC511L(hw->port),
1193 pf->offset_loaded, &os->rx_size_511,
1195 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1196 I40E_GLPRT_PRC1023L(hw->port),
1197 pf->offset_loaded, &os->rx_size_1023,
1199 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1200 I40E_GLPRT_PRC1522L(hw->port),
1201 pf->offset_loaded, &os->rx_size_1522,
1203 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1204 I40E_GLPRT_PRC9522L(hw->port),
1205 pf->offset_loaded, &os->rx_size_big,
1207 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1208 pf->offset_loaded, &os->rx_undersize,
1210 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1211 pf->offset_loaded, &os->rx_fragments,
1213 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1214 pf->offset_loaded, &os->rx_oversize,
1216 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1217 pf->offset_loaded, &os->rx_jabber,
1219 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1220 I40E_GLPRT_PTC64L(hw->port),
1221 pf->offset_loaded, &os->tx_size_64,
1223 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1224 I40E_GLPRT_PTC127L(hw->port),
1225 pf->offset_loaded, &os->tx_size_127,
1227 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1228 I40E_GLPRT_PTC255L(hw->port),
1229 pf->offset_loaded, &os->tx_size_255,
1231 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1232 I40E_GLPRT_PTC511L(hw->port),
1233 pf->offset_loaded, &os->tx_size_511,
1235 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1236 I40E_GLPRT_PTC1023L(hw->port),
1237 pf->offset_loaded, &os->tx_size_1023,
1239 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1240 I40E_GLPRT_PTC1522L(hw->port),
1241 pf->offset_loaded, &os->tx_size_1522,
1243 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1244 I40E_GLPRT_PTC9522L(hw->port),
1245 pf->offset_loaded, &os->tx_size_big,
1247 /* GLPRT_MSPDC not supported */
1248 /* GLPRT_XEC not supported */
1250 pf->offset_loaded = true;
1253 i40e_update_vsi_stats(pf->main_vsi);
1255 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1256 ns->eth.rx_broadcast;
1257 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1258 ns->eth.tx_broadcast;
1259 stats->ibytes = ns->eth.rx_bytes;
1260 stats->obytes = ns->eth.tx_bytes;
1261 stats->oerrors = ns->eth.tx_errors;
1262 stats->imcasts = ns->eth.rx_multicast;
1265 stats->ibadcrc = ns->crc_errors;
1266 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
1267 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1268 stats->imissed = ns->eth.rx_discards;
1269 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
1271 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1272 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
1273 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
1274 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
1275 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
1276 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
1277 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1278 ns->eth.rx_unknown_protocol);
1279 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
1280 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
1281 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
1282 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
1283 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
1284 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
1286 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
1287 ns->tx_dropped_link_down);
1288 PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
1289 PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
1291 PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
1292 PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
1293 ns->mac_local_faults);
1294 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
1295 ns->mac_remote_faults);
1296 PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
1297 ns->rx_length_errors);
1298 PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
1299 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
1300 for (i = 0; i < 8; i++) {
1301 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
1302 i, ns->priority_xon_rx[i]);
1303 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
1304 i, ns->priority_xoff_rx[i]);
1306 PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
1307 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
1308 for (i = 0; i < 8; i++) {
1309 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
1310 i, ns->priority_xon_tx[i]);
1311 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
1312 i, ns->priority_xoff_tx[i]);
1313 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
1314 i, ns->priority_xon_2_xoff[i]);
1316 PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
1317 PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
1318 PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
1319 PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
1320 PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
1321 PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
1322 PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
1323 PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
1324 PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
1325 PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
1326 PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
1327 PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
1328 PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
1329 PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
1330 PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
1331 PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
1332 PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
1333 PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
1334 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1335 ns->mac_short_packet_dropped);
1336 PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
1337 ns->checksum_error);
1338 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1341 /* Reset the statistics */
1343 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1345 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1347 /* It results in reloading the start point of each counter */
1348 pf->offset_loaded = false;
1352 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1353 __rte_unused uint16_t queue_id,
1354 __rte_unused uint8_t stat_idx,
1355 __rte_unused uint8_t is_rx)
1357 PMD_INIT_FUNC_TRACE();
1363 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1365 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1366 struct i40e_vsi *vsi = pf->main_vsi;
1368 dev_info->max_rx_queues = vsi->nb_qps;
1369 dev_info->max_tx_queues = vsi->nb_qps;
1370 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1371 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1372 dev_info->max_mac_addrs = vsi->max_macaddrs;
1373 dev_info->max_vfs = dev->pci_dev->max_vfs;
1374 dev_info->rx_offload_capa =
1375 DEV_RX_OFFLOAD_VLAN_STRIP |
1376 DEV_RX_OFFLOAD_IPV4_CKSUM |
1377 DEV_RX_OFFLOAD_UDP_CKSUM |
1378 DEV_RX_OFFLOAD_TCP_CKSUM;
1379 dev_info->tx_offload_capa =
1380 DEV_TX_OFFLOAD_VLAN_INSERT |
1381 DEV_TX_OFFLOAD_IPV4_CKSUM |
1382 DEV_TX_OFFLOAD_UDP_CKSUM |
1383 DEV_TX_OFFLOAD_TCP_CKSUM |
1384 DEV_TX_OFFLOAD_SCTP_CKSUM;
1386 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1388 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1389 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1390 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1392 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1396 dev_info->default_txconf = (struct rte_eth_txconf) {
1398 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1399 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1400 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1402 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1403 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1404 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
1410 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1412 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1413 struct i40e_vsi *vsi = pf->main_vsi;
1414 PMD_INIT_FUNC_TRACE();
1417 return i40e_vsi_add_vlan(vsi, vlan_id);
1419 return i40e_vsi_delete_vlan(vsi, vlan_id);
1423 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1424 __rte_unused uint16_t tpid)
1426 PMD_INIT_FUNC_TRACE();
1430 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1432 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1433 struct i40e_vsi *vsi = pf->main_vsi;
1435 if (mask & ETH_VLAN_STRIP_MASK) {
1436 /* Enable or disable VLAN stripping */
1437 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1438 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1440 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1443 if (mask & ETH_VLAN_EXTEND_MASK) {
1444 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1445 i40e_vsi_config_double_vlan(vsi, TRUE);
1447 i40e_vsi_config_double_vlan(vsi, FALSE);
1452 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1453 __rte_unused uint16_t queue,
1454 __rte_unused int on)
1456 PMD_INIT_FUNC_TRACE();
1460 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1462 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1463 struct i40e_vsi *vsi = pf->main_vsi;
1464 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1465 struct i40e_vsi_vlan_pvid_info info;
1467 memset(&info, 0, sizeof(info));
1470 info.config.pvid = pvid;
1472 info.config.reject.tagged =
1473 data->dev_conf.txmode.hw_vlan_reject_tagged;
1474 info.config.reject.untagged =
1475 data->dev_conf.txmode.hw_vlan_reject_untagged;
1478 return i40e_vsi_vlan_pvid_set(vsi, &info);
1482 i40e_dev_led_on(struct rte_eth_dev *dev)
1484 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1485 uint32_t mode = i40e_led_get(hw);
1488 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1494 i40e_dev_led_off(struct rte_eth_dev *dev)
1496 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 uint32_t mode = i40e_led_get(hw);
1500 i40e_led_set(hw, 0, false);
1506 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1507 __rte_unused struct rte_eth_fc_conf *fc_conf)
1509 PMD_INIT_FUNC_TRACE();
1515 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1516 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1518 PMD_INIT_FUNC_TRACE();
1523 /* Add a MAC address, and update filters */
1525 i40e_macaddr_add(struct rte_eth_dev *dev,
1526 struct ether_addr *mac_addr,
1527 __attribute__((unused)) uint32_t index,
1528 __attribute__((unused)) uint32_t pool)
1530 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1531 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532 struct i40e_vsi *vsi = pf->main_vsi;
1533 struct ether_addr old_mac;
1536 if (!is_valid_assigned_ether_addr(mac_addr)) {
1537 PMD_DRV_LOG(ERR, "Invalid ethernet address");
1541 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1542 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
1546 /* Write mac address */
1547 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1548 mac_addr->addr_bytes, NULL);
1549 if (ret != I40E_SUCCESS) {
1550 PMD_DRV_LOG(ERR, "Failed to write mac address");
1554 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1555 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1558 ret = i40e_vsi_add_mac(vsi, mac_addr);
1559 if (ret != I40E_SUCCESS) {
1560 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1564 ether_addr_copy(mac_addr, &pf->dev_addr);
1565 i40e_vsi_delete_mac(vsi, &old_mac);
1568 /* Remove a MAC address, and update filters */
1570 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1572 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1573 struct i40e_vsi *vsi = pf->main_vsi;
1574 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1575 struct ether_addr *macaddr;
1577 struct i40e_hw *hw =
1578 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580 if (index >= vsi->max_macaddrs)
1583 macaddr = &(data->mac_addrs[index]);
1584 if (!is_valid_assigned_ether_addr(macaddr))
1587 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1588 hw->mac.perm_addr, NULL);
1589 if (ret != I40E_SUCCESS) {
1590 PMD_DRV_LOG(ERR, "Failed to write mac address");
1594 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1596 ret = i40e_vsi_delete_mac(vsi, macaddr);
1597 if (ret != I40E_SUCCESS)
1600 /* Clear device address as it has been removed */
1601 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1602 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1606 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1607 struct rte_eth_rss_reta *reta_conf)
1609 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1611 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1613 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1615 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1617 mask = (uint8_t)((reta_conf->mask_hi >>
1626 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1628 for (j = 0, lut = 0; j < 4; j++) {
1629 if (mask & (0x1 << j))
1630 lut |= reta_conf->reta[i + j] << (8 * j);
1632 lut |= l & (0xFF << (8 * j));
1634 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1641 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1642 struct rte_eth_rss_reta *reta_conf)
1644 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1646 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1648 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1650 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1652 mask = (uint8_t)((reta_conf->mask_hi >>
1658 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1659 for (j = 0; j < 4; j++) {
1660 if (mask & (0x1 << j))
1661 reta_conf->reta[i + j] =
1662 (uint8_t)((lut >> (8 * j)) & 0xFF);
1670 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1671 * @hw: pointer to the HW structure
1672 * @mem: pointer to mem struct to fill out
1673 * @size: size of memory requested
1674 * @alignment: what to align the allocation to
1676 enum i40e_status_code
1677 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1678 struct i40e_dma_mem *mem,
1682 static uint64_t id = 0;
1683 const struct rte_memzone *mz = NULL;
1684 char z_name[RTE_MEMZONE_NAMESIZE];
1687 return I40E_ERR_PARAM;
1690 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1691 #ifdef RTE_LIBRTE_XEN_DOM0
1692 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1695 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1698 return I40E_ERR_NO_MEMORY;
1703 #ifdef RTE_LIBRTE_XEN_DOM0
1704 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1706 mem->pa = mz->phys_addr;
1709 return I40E_SUCCESS;
1713 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1714 * @hw: pointer to the HW structure
1715 * @mem: ptr to mem struct to free
1717 enum i40e_status_code
1718 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1719 struct i40e_dma_mem *mem)
1721 if (!mem || !mem->va)
1722 return I40E_ERR_PARAM;
1727 return I40E_SUCCESS;
1731 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1732 * @hw: pointer to the HW structure
1733 * @mem: pointer to mem struct to fill out
1734 * @size: size of memory requested
1736 enum i40e_status_code
1737 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1738 struct i40e_virt_mem *mem,
1742 return I40E_ERR_PARAM;
1745 mem->va = rte_zmalloc("i40e", size, 0);
1748 return I40E_SUCCESS;
1750 return I40E_ERR_NO_MEMORY;
1754 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1755 * @hw: pointer to the HW structure
1756 * @mem: pointer to mem struct to free
1758 enum i40e_status_code
1759 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1760 struct i40e_virt_mem *mem)
1763 return I40E_ERR_PARAM;
1768 return I40E_SUCCESS;
1772 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1774 rte_spinlock_init(&sp->spinlock);
1778 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1780 rte_spinlock_lock(&sp->spinlock);
1784 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1786 rte_spinlock_unlock(&sp->spinlock);
1790 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1796 * Get the hardware capabilities, which will be parsed
1797 * and saved into struct i40e_hw.
1800 i40e_get_cap(struct i40e_hw *hw)
1802 struct i40e_aqc_list_capabilities_element_resp *buf;
1803 uint16_t len, size = 0;
1806 /* Calculate a huge enough buff for saving response data temporarily */
1807 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1808 I40E_MAX_CAP_ELE_NUM;
1809 buf = rte_zmalloc("i40e", len, 0);
1811 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1812 return I40E_ERR_NO_MEMORY;
1815 /* Get, parse the capabilities and save it to hw */
1816 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1817 i40e_aqc_opc_list_func_capabilities, NULL);
1818 if (ret != I40E_SUCCESS)
1819 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1821 /* Free the temporary buffer after being used */
1828 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1830 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1831 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1832 uint16_t sum_queues = 0, sum_vsis;
1834 /* First check if FW support SRIOV */
1835 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1836 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
1840 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1841 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1842 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
1843 /* Allocate queues for pf */
1844 if (hw->func_caps.rss) {
1845 pf->flags |= I40E_FLAG_RSS;
1846 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1847 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1848 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1851 sum_queues = pf->lan_nb_qps;
1852 /* Default VSI is not counted in */
1854 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
1856 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1857 pf->flags |= I40E_FLAG_SRIOV;
1858 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1859 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1860 PMD_INIT_LOG(ERR, "Config VF number %u, "
1861 "max supported %u.",
1862 dev->pci_dev->max_vfs,
1863 hw->func_caps.num_vfs);
1866 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1867 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1868 "max support %u queues.",
1869 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
1872 pf->vf_num = dev->pci_dev->max_vfs;
1873 sum_queues += pf->vf_nb_qps * pf->vf_num;
1874 sum_vsis += pf->vf_num;
1875 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
1876 pf->vf_num, pf->vf_nb_qps);
1880 if (hw->func_caps.vmdq) {
1881 pf->flags |= I40E_FLAG_VMDQ;
1882 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1883 sum_queues += pf->vmdq_nb_qps;
1885 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
1888 if (hw->func_caps.fd) {
1889 pf->flags |= I40E_FLAG_FDIR;
1890 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1892 * Each flow director consumes one VSI and one queue,
1893 * but can't calculate out predictably here.
1897 if (sum_vsis > pf->max_num_vsi ||
1898 sum_queues > hw->func_caps.num_rx_qp) {
1899 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
1900 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
1901 pf->max_num_vsi, sum_vsis);
1902 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
1903 hw->func_caps.num_rx_qp, sum_queues);
1907 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
1909 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1910 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
1911 sum_vsis, hw->func_caps.num_msix_vectors);
1914 return I40E_SUCCESS;
1918 i40e_pf_get_switch_config(struct i40e_pf *pf)
1920 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1921 struct i40e_aqc_get_switch_config_resp *switch_config;
1922 struct i40e_aqc_switch_config_element_resp *element;
1923 uint16_t start_seid = 0, num_reported;
1926 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1927 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1928 if (!switch_config) {
1929 PMD_DRV_LOG(ERR, "Failed to allocated memory");
1933 /* Get the switch configurations */
1934 ret = i40e_aq_get_switch_config(hw, switch_config,
1935 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1936 if (ret != I40E_SUCCESS) {
1937 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
1940 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1941 if (num_reported != 1) { /* The number should be 1 */
1942 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
1946 /* Parse the switch configuration elements */
1947 element = &(switch_config->element[0]);
1948 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1949 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1950 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1952 PMD_DRV_LOG(INFO, "Unknown element type");
1955 rte_free(switch_config);
1961 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1964 struct pool_entry *entry;
1966 if (pool == NULL || num == 0)
1969 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1970 if (entry == NULL) {
1971 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
1975 /* queue heap initialize */
1976 pool->num_free = num;
1977 pool->num_alloc = 0;
1979 LIST_INIT(&pool->alloc_list);
1980 LIST_INIT(&pool->free_list);
1982 /* Initialize element */
1986 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1991 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1993 struct pool_entry *entry;
1998 LIST_FOREACH(entry, &pool->alloc_list, next) {
1999 LIST_REMOVE(entry, next);
2003 LIST_FOREACH(entry, &pool->free_list, next) {
2004 LIST_REMOVE(entry, next);
2009 pool->num_alloc = 0;
2011 LIST_INIT(&pool->alloc_list);
2012 LIST_INIT(&pool->free_list);
2016 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2019 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2020 uint32_t pool_offset;
2024 PMD_DRV_LOG(ERR, "Invalid parameter");
2028 pool_offset = base - pool->base;
2029 /* Lookup in alloc list */
2030 LIST_FOREACH(entry, &pool->alloc_list, next) {
2031 if (entry->base == pool_offset) {
2032 valid_entry = entry;
2033 LIST_REMOVE(entry, next);
2038 /* Not find, return */
2039 if (valid_entry == NULL) {
2040 PMD_DRV_LOG(ERR, "Failed to find entry");
2045 * Found it, move it to free list and try to merge.
2046 * In order to make merge easier, always sort it by qbase.
2047 * Find adjacent prev and last entries.
2050 LIST_FOREACH(entry, &pool->free_list, next) {
2051 if (entry->base > valid_entry->base) {
2059 /* Try to merge with next one*/
2061 /* Merge with next one */
2062 if (valid_entry->base + valid_entry->len == next->base) {
2063 next->base = valid_entry->base;
2064 next->len += valid_entry->len;
2065 rte_free(valid_entry);
2072 /* Merge with previous one */
2073 if (prev->base + prev->len == valid_entry->base) {
2074 prev->len += valid_entry->len;
2075 /* If it merge with next one, remove next node */
2077 LIST_REMOVE(valid_entry, next);
2078 rte_free(valid_entry);
2080 rte_free(valid_entry);
2086 /* Not find any entry to merge, insert */
2089 LIST_INSERT_AFTER(prev, valid_entry, next);
2090 else if (next != NULL)
2091 LIST_INSERT_BEFORE(next, valid_entry, next);
2092 else /* It's empty list, insert to head */
2093 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2096 pool->num_free += valid_entry->len;
2097 pool->num_alloc -= valid_entry->len;
2103 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2106 struct pool_entry *entry, *valid_entry;
2108 if (pool == NULL || num == 0) {
2109 PMD_DRV_LOG(ERR, "Invalid parameter");
2113 if (pool->num_free < num) {
2114 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2115 num, pool->num_free);
2120 /* Lookup in free list and find most fit one */
2121 LIST_FOREACH(entry, &pool->free_list, next) {
2122 if (entry->len >= num) {
2124 if (entry->len == num) {
2125 valid_entry = entry;
2128 if (valid_entry == NULL || valid_entry->len > entry->len)
2129 valid_entry = entry;
2133 /* Not find one to satisfy the request, return */
2134 if (valid_entry == NULL) {
2135 PMD_DRV_LOG(ERR, "No valid entry found");
2139 * The entry have equal queue number as requested,
2140 * remove it from alloc_list.
2142 if (valid_entry->len == num) {
2143 LIST_REMOVE(valid_entry, next);
2146 * The entry have more numbers than requested,
2147 * create a new entry for alloc_list and minus its
2148 * queue base and number in free_list.
2150 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2151 if (entry == NULL) {
2152 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2156 entry->base = valid_entry->base;
2158 valid_entry->base += num;
2159 valid_entry->len -= num;
2160 valid_entry = entry;
2163 /* Insert it into alloc list, not sorted */
2164 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2166 pool->num_free -= valid_entry->len;
2167 pool->num_alloc += valid_entry->len;
2169 return (valid_entry->base + pool->base);
2173 * bitmap_is_subset - Check whether src2 is subset of src1
2176 bitmap_is_subset(uint8_t src1, uint8_t src2)
2178 return !((src1 ^ src2) & src2);
2182 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2184 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2186 /* If DCB is not supported, only default TC is supported */
2187 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2188 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2192 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2193 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2194 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2198 return I40E_SUCCESS;
2202 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2203 struct i40e_vsi_vlan_pvid_info *info)
2206 struct i40e_vsi_context ctxt;
2207 uint8_t vlan_flags = 0;
2210 if (vsi == NULL || info == NULL) {
2211 PMD_DRV_LOG(ERR, "invalid parameters");
2212 return I40E_ERR_PARAM;
2216 vsi->info.pvid = info->config.pvid;
2218 * If insert pvid is enabled, only tagged pkts are
2219 * allowed to be sent out.
2221 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2222 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2225 if (info->config.reject.tagged == 0)
2226 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2228 if (info->config.reject.untagged == 0)
2229 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2231 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2232 I40E_AQ_VSI_PVLAN_MODE_MASK);
2233 vsi->info.port_vlan_flags |= vlan_flags;
2234 vsi->info.valid_sections =
2235 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2236 memset(&ctxt, 0, sizeof(ctxt));
2237 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2238 ctxt.seid = vsi->seid;
2240 hw = I40E_VSI_TO_HW(vsi);
2241 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2242 if (ret != I40E_SUCCESS)
2243 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2249 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2251 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2253 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2255 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2256 if (ret != I40E_SUCCESS)
2260 PMD_DRV_LOG(ERR, "seid not valid");
2264 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2265 tc_bw_data.tc_valid_bits = enabled_tcmap;
2266 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2267 tc_bw_data.tc_bw_credits[i] =
2268 (enabled_tcmap & (1 << i)) ? 1 : 0;
2270 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2271 if (ret != I40E_SUCCESS) {
2272 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2276 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2277 sizeof(vsi->info.qs_handle));
2278 return I40E_SUCCESS;
2282 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2283 struct i40e_aqc_vsi_properties_data *info,
2284 uint8_t enabled_tcmap)
2286 int ret, total_tc = 0, i;
2287 uint16_t qpnum_per_tc, bsf, qp_idx;
2289 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2290 if (ret != I40E_SUCCESS)
2293 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2294 if (enabled_tcmap & (1 << i))
2296 vsi->enabled_tc = enabled_tcmap;
2298 /* Number of queues per enabled TC */
2299 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2300 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2301 bsf = rte_bsf32(qpnum_per_tc);
2303 /* Adjust the queue number to actual queues that can be applied */
2304 vsi->nb_qps = qpnum_per_tc * total_tc;
2307 * Configure TC and queue mapping parameters, for enabled TC,
2308 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2309 * default queue will serve it.
2312 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2313 if (vsi->enabled_tc & (1 << i)) {
2314 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2315 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2316 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2317 qp_idx += qpnum_per_tc;
2319 info->tc_mapping[i] = 0;
2322 /* Associate queue number with VSI */
2323 if (vsi->type == I40E_VSI_SRIOV) {
2324 info->mapping_flags |=
2325 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2326 for (i = 0; i < vsi->nb_qps; i++)
2327 info->queue_mapping[i] =
2328 rte_cpu_to_le_16(vsi->base_queue + i);
2330 info->mapping_flags |=
2331 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2332 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2334 info->valid_sections =
2335 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2337 return I40E_SUCCESS;
2341 i40e_veb_release(struct i40e_veb *veb)
2343 struct i40e_vsi *vsi;
2346 if (veb == NULL || veb->associate_vsi == NULL)
2349 if (!TAILQ_EMPTY(&veb->head)) {
2350 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2354 vsi = veb->associate_vsi;
2355 hw = I40E_VSI_TO_HW(vsi);
2357 vsi->uplink_seid = veb->uplink_seid;
2358 i40e_aq_delete_element(hw, veb->seid, NULL);
2361 return I40E_SUCCESS;
2365 static struct i40e_veb *
2366 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2368 struct i40e_veb *veb;
2372 if (NULL == pf || vsi == NULL) {
2373 PMD_DRV_LOG(ERR, "veb setup failed, "
2374 "associated VSI shouldn't null");
2377 hw = I40E_PF_TO_HW(pf);
2379 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2381 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2385 veb->associate_vsi = vsi;
2386 TAILQ_INIT(&veb->head);
2387 veb->uplink_seid = vsi->uplink_seid;
2389 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2390 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2392 if (ret != I40E_SUCCESS) {
2393 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2394 hw->aq.asq_last_status);
2398 /* get statistics index */
2399 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2400 &veb->stats_idx, NULL, NULL, NULL);
2401 if (ret != I40E_SUCCESS) {
2402 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2403 hw->aq.asq_last_status);
2407 /* Get VEB bandwidth, to be implemented */
2408 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2409 vsi->uplink_seid = veb->seid;
2418 i40e_vsi_release(struct i40e_vsi *vsi)
2422 struct i40e_vsi_list *vsi_list;
2424 struct i40e_mac_filter *f;
2427 return I40E_SUCCESS;
2429 pf = I40E_VSI_TO_PF(vsi);
2430 hw = I40E_VSI_TO_HW(vsi);
2432 /* VSI has child to attach, release child first */
2434 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2435 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2437 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2439 i40e_veb_release(vsi->veb);
2442 /* Remove all macvlan filters of the VSI */
2443 i40e_vsi_remove_all_macvlan_filter(vsi);
2444 TAILQ_FOREACH(f, &vsi->mac_list, next)
2447 if (vsi->type != I40E_VSI_MAIN) {
2448 /* Remove vsi from parent's sibling list */
2449 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2450 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2451 return I40E_ERR_PARAM;
2453 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2454 &vsi->sib_vsi_list, list);
2456 /* Remove all switch element of the VSI */
2457 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2458 if (ret != I40E_SUCCESS)
2459 PMD_DRV_LOG(ERR, "Failed to delete element");
2461 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2463 if (vsi->type != I40E_VSI_SRIOV)
2464 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2467 return I40E_SUCCESS;
2471 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2473 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2474 struct i40e_aqc_remove_macvlan_element_data def_filter;
2477 if (vsi->type != I40E_VSI_MAIN)
2478 return I40E_ERR_CONFIG;
2479 memset(&def_filter, 0, sizeof(def_filter));
2480 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2482 def_filter.vlan_tag = 0;
2483 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2484 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2485 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2486 if (ret != I40E_SUCCESS) {
2487 struct i40e_mac_filter *f;
2489 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2491 /* It needs to add the permanent mac into mac list */
2492 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2494 PMD_DRV_LOG(ERR, "failed to allocate memory");
2495 return I40E_ERR_NO_MEMORY;
2497 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2499 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2505 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2509 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2511 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2512 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2513 struct i40e_hw *hw = &vsi->adapter->hw;
2517 memset(&bw_config, 0, sizeof(bw_config));
2518 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2519 if (ret != I40E_SUCCESS) {
2520 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2521 hw->aq.asq_last_status);
2525 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2526 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2527 &ets_sla_config, NULL);
2528 if (ret != I40E_SUCCESS) {
2529 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2530 "configuration %u", hw->aq.asq_last_status);
2534 /* Not store the info yet, just print out */
2535 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2536 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2537 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2538 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2539 ets_sla_config.share_credits[i]);
2540 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2541 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2542 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2543 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2552 i40e_vsi_setup(struct i40e_pf *pf,
2553 enum i40e_vsi_type type,
2554 struct i40e_vsi *uplink_vsi,
2555 uint16_t user_param)
2557 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2558 struct i40e_vsi *vsi;
2560 struct i40e_vsi_context ctxt;
2561 struct ether_addr broadcast =
2562 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2564 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2565 PMD_DRV_LOG(ERR, "VSI setup failed, "
2566 "VSI link shouldn't be NULL");
2570 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2571 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2572 "uplink VSI should be NULL");
2576 /* If uplink vsi didn't setup VEB, create one first */
2577 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2578 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2580 if (NULL == uplink_vsi->veb) {
2581 PMD_DRV_LOG(ERR, "VEB setup failed");
2586 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2588 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2591 TAILQ_INIT(&vsi->mac_list);
2593 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2594 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2595 vsi->parent_vsi = uplink_vsi;
2596 vsi->user_param = user_param;
2597 /* Allocate queues */
2598 switch (vsi->type) {
2599 case I40E_VSI_MAIN :
2600 vsi->nb_qps = pf->lan_nb_qps;
2602 case I40E_VSI_SRIOV :
2603 vsi->nb_qps = pf->vf_nb_qps;
2608 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2610 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2614 vsi->base_queue = ret;
2616 /* VF has MSIX interrupt in VF range, don't allocate here */
2617 if (type != I40E_VSI_SRIOV) {
2618 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2620 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2621 goto fail_queue_alloc;
2623 vsi->msix_intr = ret;
2627 if (type == I40E_VSI_MAIN) {
2628 /* For main VSI, no need to add since it's default one */
2629 vsi->uplink_seid = pf->mac_seid;
2630 vsi->seid = pf->main_vsi_seid;
2631 /* Bind queues with specific MSIX interrupt */
2633 * Needs 2 interrupt at least, one for misc cause which will
2634 * enabled from OS side, Another for queues binding the
2635 * interrupt from device side only.
2638 /* Get default VSI parameters from hardware */
2639 memset(&ctxt, 0, sizeof(ctxt));
2640 ctxt.seid = vsi->seid;
2641 ctxt.pf_num = hw->pf_id;
2642 ctxt.uplink_seid = vsi->uplink_seid;
2644 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2645 if (ret != I40E_SUCCESS) {
2646 PMD_DRV_LOG(ERR, "Failed to get VSI params");
2647 goto fail_msix_alloc;
2649 (void)rte_memcpy(&vsi->info, &ctxt.info,
2650 sizeof(struct i40e_aqc_vsi_properties_data));
2651 vsi->vsi_id = ctxt.vsi_number;
2652 vsi->info.valid_sections = 0;
2654 /* Configure tc, enabled TC0 only */
2655 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2657 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2658 goto fail_msix_alloc;
2661 /* TC, queue mapping */
2662 memset(&ctxt, 0, sizeof(ctxt));
2663 vsi->info.valid_sections |=
2664 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2665 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2666 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2667 (void)rte_memcpy(&ctxt.info, &vsi->info,
2668 sizeof(struct i40e_aqc_vsi_properties_data));
2669 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2670 I40E_DEFAULT_TCMAP);
2671 if (ret != I40E_SUCCESS) {
2672 PMD_DRV_LOG(ERR, "Failed to configure "
2673 "TC queue mapping");
2674 goto fail_msix_alloc;
2676 ctxt.seid = vsi->seid;
2677 ctxt.pf_num = hw->pf_id;
2678 ctxt.uplink_seid = vsi->uplink_seid;
2681 /* Update VSI parameters */
2682 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2683 if (ret != I40E_SUCCESS) {
2684 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2685 goto fail_msix_alloc;
2688 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2689 sizeof(vsi->info.tc_mapping));
2690 (void)rte_memcpy(&vsi->info.queue_mapping,
2691 &ctxt.info.queue_mapping,
2692 sizeof(vsi->info.queue_mapping));
2693 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2694 vsi->info.valid_sections = 0;
2696 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2700 * Updating default filter settings are necessary to prevent
2701 * reception of tagged packets.
2702 * Some old firmware configurations load a default macvlan
2703 * filter which accepts both tagged and untagged packets.
2704 * The updating is to use a normal filter instead if needed.
2705 * For NVM 4.2.2 or after, the updating is not needed anymore.
2706 * The firmware with correct configurations load the default
2707 * macvlan filter which is expected and cannot be removed.
2709 i40e_update_default_filter_setting(vsi);
2710 } else if (type == I40E_VSI_SRIOV) {
2711 memset(&ctxt, 0, sizeof(ctxt));
2713 * For other VSI, the uplink_seid equals to uplink VSI's
2714 * uplink_seid since they share same VEB
2716 vsi->uplink_seid = uplink_vsi->uplink_seid;
2717 ctxt.pf_num = hw->pf_id;
2718 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2719 ctxt.uplink_seid = vsi->uplink_seid;
2720 ctxt.connection_type = 0x1;
2721 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2723 /* Configure switch ID */
2724 ctxt.info.valid_sections |=
2725 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2726 ctxt.info.switch_id =
2727 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2728 /* Configure port/vlan */
2729 ctxt.info.valid_sections |=
2730 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2731 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2732 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2733 I40E_DEFAULT_TCMAP);
2734 if (ret != I40E_SUCCESS) {
2735 PMD_DRV_LOG(ERR, "Failed to configure "
2736 "TC queue mapping");
2737 goto fail_msix_alloc;
2739 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2740 ctxt.info.valid_sections |=
2741 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2743 * Since VSI is not created yet, only configure parameter,
2744 * will add vsi below.
2748 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2749 goto fail_msix_alloc;
2752 if (vsi->type != I40E_VSI_MAIN) {
2753 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2755 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2756 hw->aq.asq_last_status);
2757 goto fail_msix_alloc;
2759 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2760 vsi->info.valid_sections = 0;
2761 vsi->seid = ctxt.seid;
2762 vsi->vsi_id = ctxt.vsi_number;
2763 vsi->sib_vsi_list.vsi = vsi;
2764 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2765 &vsi->sib_vsi_list, list);
2768 /* MAC/VLAN configuration */
2769 ret = i40e_vsi_add_mac(vsi, &broadcast);
2770 if (ret != I40E_SUCCESS) {
2771 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2772 goto fail_msix_alloc;
2775 /* Get VSI BW information */
2776 i40e_vsi_dump_bw_config(vsi);
2779 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2781 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2787 /* Configure vlan stripping on or off */
2789 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2791 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2792 struct i40e_vsi_context ctxt;
2794 int ret = I40E_SUCCESS;
2796 /* Check if it has been already on or off */
2797 if (vsi->info.valid_sections &
2798 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2800 if ((vsi->info.port_vlan_flags &
2801 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2802 return 0; /* already on */
2804 if ((vsi->info.port_vlan_flags &
2805 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2806 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2807 return 0; /* already off */
2812 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2814 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2815 vsi->info.valid_sections =
2816 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2817 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2818 vsi->info.port_vlan_flags |= vlan_flags;
2819 ctxt.seid = vsi->seid;
2820 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2821 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2823 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2824 on ? "enable" : "disable");
2830 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2832 struct rte_eth_dev_data *data = dev->data;
2835 /* Apply vlan offload setting */
2836 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2838 /* Apply double-vlan setting, not implemented yet */
2840 /* Apply pvid setting */
2841 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2842 data->dev_conf.txmode.hw_vlan_insert_pvid);
2844 PMD_DRV_LOG(INFO, "Failed to update VSI params");
2850 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2852 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2854 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2858 i40e_update_flow_control(struct i40e_hw *hw)
2860 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2861 struct i40e_link_status link_status;
2862 uint32_t rxfc = 0, txfc = 0, reg;
2866 memset(&link_status, 0, sizeof(link_status));
2867 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2868 if (ret != I40E_SUCCESS) {
2869 PMD_DRV_LOG(ERR, "Failed to get link status information");
2870 goto write_reg; /* Disable flow control */
2873 an_info = hw->phy.link_info.an_info;
2874 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2875 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
2876 ret = I40E_ERR_NOT_READY;
2877 goto write_reg; /* Disable flow control */
2880 * If link auto negotiation is enabled, flow control needs to
2881 * be configured according to it
2883 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2884 case I40E_LINK_PAUSE_RXTX:
2887 hw->fc.current_mode = I40E_FC_FULL;
2889 case I40E_AQ_LINK_PAUSE_RX:
2891 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2893 case I40E_AQ_LINK_PAUSE_TX:
2895 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2898 hw->fc.current_mode = I40E_FC_NONE;
2903 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2904 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2905 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2906 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2907 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2908 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2915 i40e_pf_setup(struct i40e_pf *pf)
2917 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2918 struct i40e_filter_control_settings settings;
2919 struct rte_eth_dev_data *dev_data = pf->dev_data;
2920 struct i40e_vsi *vsi;
2923 /* Clear all stats counters */
2924 pf->offset_loaded = FALSE;
2925 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2926 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2928 ret = i40e_pf_get_switch_config(pf);
2929 if (ret != I40E_SUCCESS) {
2930 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2935 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2937 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2938 return I40E_ERR_NOT_READY;
2941 dev_data->nb_rx_queues = vsi->nb_qps;
2942 dev_data->nb_tx_queues = vsi->nb_qps;
2944 /* Configure filter control */
2945 memset(&settings, 0, sizeof(settings));
2946 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2947 /* Enable ethtype and macvlan filters */
2948 settings.enable_ethtype = TRUE;
2949 settings.enable_macvlan = TRUE;
2950 ret = i40e_set_filter_control(hw, &settings);
2952 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2955 /* Update flow control according to the auto negotiation */
2956 i40e_update_flow_control(hw);
2958 return I40E_SUCCESS;
2962 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2968 * Set or clear TX Queue Disable flags,
2969 * which is required by hardware.
2971 i40e_pre_tx_queue_cfg(hw, q_idx, on);
2972 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
2974 /* Wait until the request is finished */
2975 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2976 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2977 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2978 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2979 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2985 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2986 return I40E_SUCCESS; /* already on, skip next steps */
2988 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
2989 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2991 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2992 return I40E_SUCCESS; /* already off, skip next steps */
2993 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2995 /* Write the register */
2996 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2997 /* Check the result */
2998 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2999 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3000 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3002 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3003 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3006 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3007 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3011 /* Check if it is timeout */
3012 if (j >= I40E_CHK_Q_ENA_COUNT) {
3013 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3014 (on ? "enable" : "disable"), q_idx);
3015 return I40E_ERR_TIMEOUT;
3018 return I40E_SUCCESS;
3021 /* Swith on or off the tx queues */
3023 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3025 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3026 struct i40e_tx_queue *txq;
3027 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3031 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3032 txq = dev_data->tx_queues[i];
3033 /* Don't operate the queue if not configured or
3034 * if starting only per queue */
3035 if (!txq->q_set || (on && txq->tx_deferred_start))
3038 ret = i40e_dev_tx_queue_start(dev, i);
3040 ret = i40e_dev_tx_queue_stop(dev, i);
3041 if ( ret != I40E_SUCCESS)
3045 return I40E_SUCCESS;
3049 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3054 /* Wait until the request is finished */
3055 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3056 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3057 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3058 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3059 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3064 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3065 return I40E_SUCCESS; /* Already on, skip next steps */
3066 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3068 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3069 return I40E_SUCCESS; /* Already off, skip next steps */
3070 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3073 /* Write the register */
3074 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3075 /* Check the result */
3076 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3077 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3078 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3080 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3081 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3084 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3085 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3090 /* Check if it is timeout */
3091 if (j >= I40E_CHK_Q_ENA_COUNT) {
3092 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3093 (on ? "enable" : "disable"), q_idx);
3094 return I40E_ERR_TIMEOUT;
3097 return I40E_SUCCESS;
3099 /* Switch on or off the rx queues */
3101 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3103 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3104 struct i40e_rx_queue *rxq;
3105 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3109 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3110 rxq = dev_data->rx_queues[i];
3111 /* Don't operate the queue if not configured or
3112 * if starting only per queue */
3113 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3116 ret = i40e_dev_rx_queue_start(dev, i);
3118 ret = i40e_dev_rx_queue_stop(dev, i);
3119 if (ret != I40E_SUCCESS)
3123 return I40E_SUCCESS;
3126 /* Switch on or off all the rx/tx queues */
3128 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3133 /* enable rx queues before enabling tx queues */
3134 ret = i40e_vsi_switch_rx_queues(vsi, on);
3136 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3139 ret = i40e_vsi_switch_tx_queues(vsi, on);
3141 /* Stop tx queues before stopping rx queues */
3142 ret = i40e_vsi_switch_tx_queues(vsi, on);
3144 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3147 ret = i40e_vsi_switch_rx_queues(vsi, on);
3153 /* Initialize VSI for TX */
3155 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3157 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3158 struct rte_eth_dev_data *data = pf->dev_data;
3160 uint32_t ret = I40E_SUCCESS;
3162 for (i = 0; i < data->nb_tx_queues; i++) {
3163 ret = i40e_tx_queue_init(data->tx_queues[i]);
3164 if (ret != I40E_SUCCESS)
3171 /* Initialize VSI for RX */
3173 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3175 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3176 struct rte_eth_dev_data *data = pf->dev_data;
3177 int ret = I40E_SUCCESS;
3180 i40e_pf_config_mq_rx(pf);
3181 for (i = 0; i < data->nb_rx_queues; i++) {
3182 ret = i40e_rx_queue_init(data->rx_queues[i]);
3183 if (ret != I40E_SUCCESS) {
3184 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3193 /* Initialize VSI */
3195 i40e_vsi_init(struct i40e_vsi *vsi)
3199 err = i40e_vsi_tx_init(vsi);
3201 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3204 err = i40e_vsi_rx_init(vsi);
3206 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3214 i40e_stat_update_32(struct i40e_hw *hw,
3222 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3226 if (new_data >= *offset)
3227 *stat = (uint64_t)(new_data - *offset);
3229 *stat = (uint64_t)((new_data +
3230 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3234 i40e_stat_update_48(struct i40e_hw *hw,
3243 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3244 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3245 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3250 if (new_data >= *offset)
3251 *stat = new_data - *offset;
3253 *stat = (uint64_t)((new_data +
3254 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3256 *stat &= I40E_48_BIT_MASK;
3261 i40e_pf_disable_irq0(struct i40e_hw *hw)
3263 /* Disable all interrupt types */
3264 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3265 I40E_WRITE_FLUSH(hw);
3270 i40e_pf_enable_irq0(struct i40e_hw *hw)
3272 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3273 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3274 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3275 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3276 I40E_WRITE_FLUSH(hw);
3280 i40e_pf_config_irq0(struct i40e_hw *hw)
3284 /* read pending request and disable first */
3285 i40e_pf_disable_irq0(hw);
3287 * Enable all interrupt error options to detect possible errors,
3288 * other informative int are ignored
3290 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3291 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3292 I40E_PFINT_ICR0_ENA_GRST_MASK |
3293 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3294 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3295 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3296 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3297 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3299 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3300 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3301 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3303 /* Link no queues with irq0 */
3304 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3305 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3309 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3311 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3312 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3315 uint32_t index, offset, val;
3320 * Try to find which VF trigger a reset, use absolute VF id to access
3321 * since the reg is global register.
3323 for (i = 0; i < pf->vf_num; i++) {
3324 abs_vf_id = hw->func_caps.vf_base_id + i;
3325 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3326 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3327 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3328 /* VFR event occured */
3329 if (val & (0x1 << offset)) {
3332 /* Clear the event first */
3333 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3335 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3337 * Only notify a VF reset event occured,
3338 * don't trigger another SW reset
3340 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3341 if (ret != I40E_SUCCESS)
3342 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3348 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3350 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3351 struct i40e_arq_event_info info;
3352 uint16_t pending, opcode;
3355 info.buf_len = I40E_AQ_BUF_SZ;
3356 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3357 if (!info.msg_buf) {
3358 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3364 ret = i40e_clean_arq_element(hw, &info, &pending);
3366 if (ret != I40E_SUCCESS) {
3367 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3368 "aq_err: %u", hw->aq.asq_last_status);
3371 opcode = rte_le_to_cpu_16(info.desc.opcode);
3374 case i40e_aqc_opc_send_msg_to_pf:
3375 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3376 i40e_pf_host_handle_vf_msg(dev,
3377 rte_le_to_cpu_16(info.desc.retval),
3378 rte_le_to_cpu_32(info.desc.cookie_high),
3379 rte_le_to_cpu_32(info.desc.cookie_low),
3384 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3389 rte_free(info.msg_buf);
3393 * Interrupt handler triggered by NIC for handling
3394 * specific interrupt.
3397 * Pointer to interrupt handle.
3399 * The address of parameter (struct rte_eth_dev *) regsitered before.
3405 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3408 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3409 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3410 uint32_t cause, enable;
3412 i40e_pf_disable_irq0(hw);
3414 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3415 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3417 /* Shared IRQ case, return */
3418 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3419 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3420 "no INT event to process", hw->pf_id);
3424 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3425 PMD_DRV_LOG(INFO, "INT:Link status changed");
3426 i40e_dev_link_update(dev, 0);
3429 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3430 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
3432 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3433 PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
3435 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3436 PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
3438 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3439 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
3441 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3442 PMD_DRV_LOG(INFO, "INT:HMC error occured");
3444 /* Add processing func to deal with VF reset vent */
3445 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3446 PMD_DRV_LOG(INFO, "INT:VF reset detected");
3447 i40e_dev_handle_vfr_event(dev);
3449 /* Find admin queue event */
3450 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3451 PMD_DRV_LOG(INFO, "INT:ADMINQ event");
3452 i40e_dev_handle_aq_msg(dev);
3456 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3457 /* Re-enable interrupt from device side */
3458 i40e_pf_enable_irq0(hw);
3459 /* Re-enable interrupt from host side */
3460 rte_intr_enable(&(dev->pci_dev->intr_handle));
3464 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3465 struct i40e_macvlan_filter *filter,
3468 int ele_num, ele_buff_size;
3469 int num, actual_num, i;
3470 int ret = I40E_SUCCESS;
3471 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3472 struct i40e_aqc_add_macvlan_element_data *req_list;
3474 if (filter == NULL || total == 0)
3475 return I40E_ERR_PARAM;
3476 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3477 ele_buff_size = hw->aq.asq_buf_size;
3479 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3480 if (req_list == NULL) {
3481 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3482 return I40E_ERR_NO_MEMORY;
3487 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3488 memset(req_list, 0, ele_buff_size);
3490 for (i = 0; i < actual_num; i++) {
3491 (void)rte_memcpy(req_list[i].mac_addr,
3492 &filter[num + i].macaddr, ETH_ADDR_LEN);
3493 req_list[i].vlan_tag =
3494 rte_cpu_to_le_16(filter[num + i].vlan_id);
3495 req_list[i].flags = rte_cpu_to_le_16(\
3496 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3497 req_list[i].queue_number = 0;
3500 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3502 if (ret != I40E_SUCCESS) {
3503 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3507 } while (num < total);
3515 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3516 struct i40e_macvlan_filter *filter,
3519 int ele_num, ele_buff_size;
3520 int num, actual_num, i;
3521 int ret = I40E_SUCCESS;
3522 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3523 struct i40e_aqc_remove_macvlan_element_data *req_list;
3525 if (filter == NULL || total == 0)
3526 return I40E_ERR_PARAM;
3528 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3529 ele_buff_size = hw->aq.asq_buf_size;
3531 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3532 if (req_list == NULL) {
3533 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3534 return I40E_ERR_NO_MEMORY;
3539 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3540 memset(req_list, 0, ele_buff_size);
3542 for (i = 0; i < actual_num; i++) {
3543 (void)rte_memcpy(req_list[i].mac_addr,
3544 &filter[num + i].macaddr, ETH_ADDR_LEN);
3545 req_list[i].vlan_tag =
3546 rte_cpu_to_le_16(filter[num + i].vlan_id);
3547 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3550 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3552 if (ret != I40E_SUCCESS) {
3553 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3557 } while (num < total);
3564 /* Find out specific MAC filter */
3565 static struct i40e_mac_filter *
3566 i40e_find_mac_filter(struct i40e_vsi *vsi,
3567 struct ether_addr *macaddr)
3569 struct i40e_mac_filter *f;
3571 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3572 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3580 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3583 uint32_t vid_idx, vid_bit;
3585 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3586 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3588 if (vsi->vfta[vid_idx] & vid_bit)
3595 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3596 uint16_t vlan_id, bool on)
3598 uint32_t vid_idx, vid_bit;
3600 #define UINT32_BIT_MASK 0x1F
3601 #define VALID_VLAN_BIT_MASK 0xFFF
3602 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3603 * element first, then find the bits it belongs to
3605 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3607 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3610 vsi->vfta[vid_idx] |= vid_bit;
3612 vsi->vfta[vid_idx] &= ~vid_bit;
3616 * Find all vlan options for specific mac addr,
3617 * return with actual vlan found.
3620 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3621 struct i40e_macvlan_filter *mv_f,
3622 int num, struct ether_addr *addr)
3628 * Not to use i40e_find_vlan_filter to decrease the loop time,
3629 * although the code looks complex.
3631 if (num < vsi->vlan_num)
3632 return I40E_ERR_PARAM;
3635 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3637 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3638 if (vsi->vfta[j] & (1 << k)) {
3640 PMD_DRV_LOG(ERR, "vlan number "
3642 return I40E_ERR_PARAM;
3644 (void)rte_memcpy(&mv_f[i].macaddr,
3645 addr, ETH_ADDR_LEN);
3647 j * I40E_UINT32_BIT_SIZE + k;
3653 return I40E_SUCCESS;
3657 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3658 struct i40e_macvlan_filter *mv_f,
3663 struct i40e_mac_filter *f;
3665 if (num < vsi->mac_num)
3666 return I40E_ERR_PARAM;
3668 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3670 PMD_DRV_LOG(ERR, "buffer number not match");
3671 return I40E_ERR_PARAM;
3673 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3674 mv_f[i].vlan_id = vlan;
3678 return I40E_SUCCESS;
3682 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3685 struct i40e_mac_filter *f;
3686 struct i40e_macvlan_filter *mv_f;
3687 int ret = I40E_SUCCESS;
3689 if (vsi == NULL || vsi->mac_num == 0)
3690 return I40E_ERR_PARAM;
3692 /* Case that no vlan is set */
3693 if (vsi->vlan_num == 0)
3696 num = vsi->mac_num * vsi->vlan_num;
3698 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3700 PMD_DRV_LOG(ERR, "failed to allocate memory");
3701 return I40E_ERR_NO_MEMORY;
3705 if (vsi->vlan_num == 0) {
3706 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3707 (void)rte_memcpy(&mv_f[i].macaddr,
3708 &f->macaddr, ETH_ADDR_LEN);
3709 mv_f[i].vlan_id = 0;
3713 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3714 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3715 vsi->vlan_num, &f->macaddr);
3716 if (ret != I40E_SUCCESS)
3722 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3730 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3732 struct i40e_macvlan_filter *mv_f;
3734 int ret = I40E_SUCCESS;
3736 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3737 return I40E_ERR_PARAM;
3739 /* If it's already set, just return */
3740 if (i40e_find_vlan_filter(vsi,vlan))
3741 return I40E_SUCCESS;
3743 mac_num = vsi->mac_num;
3746 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3747 return I40E_ERR_PARAM;
3750 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3753 PMD_DRV_LOG(ERR, "failed to allocate memory");
3754 return I40E_ERR_NO_MEMORY;
3757 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3759 if (ret != I40E_SUCCESS)
3762 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3764 if (ret != I40E_SUCCESS)
3767 i40e_set_vlan_filter(vsi, vlan, 1);
3777 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3779 struct i40e_macvlan_filter *mv_f;
3781 int ret = I40E_SUCCESS;
3784 * Vlan 0 is the generic filter for untagged packets
3785 * and can't be removed.
3787 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3788 return I40E_ERR_PARAM;
3790 /* If can't find it, just return */
3791 if (!i40e_find_vlan_filter(vsi, vlan))
3792 return I40E_ERR_PARAM;
3794 mac_num = vsi->mac_num;
3797 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3798 return I40E_ERR_PARAM;
3801 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3804 PMD_DRV_LOG(ERR, "failed to allocate memory");
3805 return I40E_ERR_NO_MEMORY;
3808 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3810 if (ret != I40E_SUCCESS)
3813 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3815 if (ret != I40E_SUCCESS)
3818 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3819 if (vsi->vlan_num == 1) {
3820 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3821 if (ret != I40E_SUCCESS)
3824 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3825 if (ret != I40E_SUCCESS)
3829 i40e_set_vlan_filter(vsi, vlan, 0);
3839 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3841 struct i40e_mac_filter *f;
3842 struct i40e_macvlan_filter *mv_f;
3844 int ret = I40E_SUCCESS;
3846 /* If it's add and we've config it, return */
3847 f = i40e_find_mac_filter(vsi, addr);
3849 return I40E_SUCCESS;
3852 * If vlan_num is 0, that's the first time to add mac,
3853 * set mask for vlan_id 0.
3855 if (vsi->vlan_num == 0) {
3856 i40e_set_vlan_filter(vsi, 0, 1);
3860 vlan_num = vsi->vlan_num;
3862 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3864 PMD_DRV_LOG(ERR, "failed to allocate memory");
3865 return I40E_ERR_NO_MEMORY;
3868 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3869 if (ret != I40E_SUCCESS)
3872 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3873 if (ret != I40E_SUCCESS)
3876 /* Add the mac addr into mac list */
3877 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3879 PMD_DRV_LOG(ERR, "failed to allocate memory");
3880 ret = I40E_ERR_NO_MEMORY;
3883 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3884 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3895 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3897 struct i40e_mac_filter *f;
3898 struct i40e_macvlan_filter *mv_f;
3900 int ret = I40E_SUCCESS;
3902 /* Can't find it, return an error */
3903 f = i40e_find_mac_filter(vsi, addr);
3905 return I40E_ERR_PARAM;
3907 vlan_num = vsi->vlan_num;
3908 if (vlan_num == 0) {
3909 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
3910 return I40E_ERR_PARAM;
3912 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3914 PMD_DRV_LOG(ERR, "failed to allocate memory");
3915 return I40E_ERR_NO_MEMORY;
3918 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3919 if (ret != I40E_SUCCESS)
3922 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3923 if (ret != I40E_SUCCESS)
3926 /* Remove the mac addr into mac list */
3927 TAILQ_REMOVE(&vsi->mac_list, f, next);
3937 /* Configure hash enable flags for RSS */
3939 i40e_config_hena(uint64_t flags)
3946 if (flags & ETH_RSS_NONF_IPV4_UDP)
3947 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3948 if (flags & ETH_RSS_NONF_IPV4_TCP)
3949 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3950 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3951 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3952 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3953 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3954 if (flags & ETH_RSS_FRAG_IPV4)
3955 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3956 if (flags & ETH_RSS_NONF_IPV6_UDP)
3957 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3958 if (flags & ETH_RSS_NONF_IPV6_TCP)
3959 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3960 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3961 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3962 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3963 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3964 if (flags & ETH_RSS_FRAG_IPV6)
3965 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3966 if (flags & ETH_RSS_L2_PAYLOAD)
3967 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3972 /* Parse the hash enable flags */
3974 i40e_parse_hena(uint64_t flags)
3976 uint64_t rss_hf = 0;
3981 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3982 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3983 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3984 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3985 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3986 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3987 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3988 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3989 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3990 rss_hf |= ETH_RSS_FRAG_IPV4;
3991 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3992 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3993 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3994 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3995 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3996 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3997 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3998 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3999 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4000 rss_hf |= ETH_RSS_FRAG_IPV6;
4001 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4002 rss_hf |= ETH_RSS_L2_PAYLOAD;
4009 i40e_pf_disable_rss(struct i40e_pf *pf)
4011 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4014 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4015 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4016 hena &= ~I40E_RSS_HENA_ALL;
4017 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4018 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4019 I40E_WRITE_FLUSH(hw);
4023 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4026 uint8_t hash_key_len;
4031 hash_key = (uint32_t *)(rss_conf->rss_key);
4032 hash_key_len = rss_conf->rss_key_len;
4033 if (hash_key != NULL && hash_key_len >=
4034 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4035 /* Fill in RSS hash key */
4036 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4037 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4040 rss_hf = rss_conf->rss_hf;
4041 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4042 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4043 hena &= ~I40E_RSS_HENA_ALL;
4044 hena |= i40e_config_hena(rss_hf);
4045 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4046 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4047 I40E_WRITE_FLUSH(hw);
4053 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4054 struct rte_eth_rss_conf *rss_conf)
4056 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4057 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4060 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4061 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4062 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4063 if (rss_hf != 0) /* Enable RSS */
4065 return 0; /* Nothing to do */
4068 if (rss_hf == 0) /* Disable RSS */
4071 return i40e_hw_rss_hash_set(hw, rss_conf);
4075 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4076 struct rte_eth_rss_conf *rss_conf)
4078 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4079 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4083 if (hash_key != NULL) {
4084 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4085 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4086 rss_conf->rss_key_len = i * sizeof(uint32_t);
4088 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4089 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4090 rss_conf->rss_hf = i40e_parse_hena(hena);
4097 i40e_pf_config_rss(struct i40e_pf *pf)
4099 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4100 struct rte_eth_rss_conf rss_conf;
4101 uint32_t i, lut = 0;
4102 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4104 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4107 lut = (lut << 8) | (j & ((0x1 <<
4108 hw->func_caps.rss_table_entry_width) - 1));
4110 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4113 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4114 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4115 i40e_pf_disable_rss(pf);
4118 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4119 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4120 /* Calculate the default hash key */
4121 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4122 rss_key_default[i] = (uint32_t)rte_rand();
4123 rss_conf.rss_key = (uint8_t *)rss_key_default;
4124 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4128 return i40e_hw_rss_hash_set(hw, &rss_conf);
4132 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4134 if (!pf->dev_data->sriov.active) {
4135 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4137 i40e_pf_config_rss(pf);
4140 i40e_pf_disable_rss(pf);
4149 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4150 enum rte_filter_type filter_type,
4151 enum rte_filter_op filter_op,
4161 switch (filter_type) {
4163 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",