4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
90 #define I40E_RSS_OFFLOAD_ALL ( \
91 ETH_RSS_NONF_IPV4_UDP | \
92 ETH_RSS_NONF_IPV4_TCP | \
93 ETH_RSS_NONF_IPV4_SCTP | \
94 ETH_RSS_NONF_IPV4_OTHER | \
96 ETH_RSS_NONF_IPV6_UDP | \
97 ETH_RSS_NONF_IPV6_TCP | \
98 ETH_RSS_NONF_IPV6_SCTP | \
99 ETH_RSS_NONF_IPV6_OTHER | \
100 ETH_RSS_FRAG_IPV6 | \
103 /* All bits of RSS hash enable */
104 #define I40E_RSS_HENA_ALL ( \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
109 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
112 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
113 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
114 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
116 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
117 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
118 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
120 static int eth_i40e_dev_init(\
121 __attribute__((unused)) struct eth_driver *eth_drv,
122 struct rte_eth_dev *eth_dev);
123 static int i40e_dev_configure(struct rte_eth_dev *dev);
124 static int i40e_dev_start(struct rte_eth_dev *dev);
125 static void i40e_dev_stop(struct rte_eth_dev *dev);
126 static void i40e_dev_close(struct rte_eth_dev *dev);
127 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
132 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
133 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
134 struct rte_eth_stats *stats);
135 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
136 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
140 static void i40e_dev_info_get(struct rte_eth_dev *dev,
141 struct rte_eth_dev_info *dev_info);
142 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
145 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
146 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
147 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
150 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
151 static int i40e_dev_led_on(struct rte_eth_dev *dev);
152 static int i40e_dev_led_off(struct rte_eth_dev *dev);
153 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
154 struct rte_eth_fc_conf *fc_conf);
155 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
156 struct rte_eth_pfc_conf *pfc_conf);
157 static void i40e_macaddr_add(struct rte_eth_dev *dev,
158 struct ether_addr *mac_addr,
161 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
162 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
163 struct rte_eth_rss_reta *reta_conf);
164 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
165 struct rte_eth_rss_reta *reta_conf);
167 static int i40e_get_cap(struct i40e_hw *hw);
168 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
169 static int i40e_pf_setup(struct i40e_pf *pf);
170 static int i40e_vsi_init(struct i40e_vsi *vsi);
171 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
172 bool offset_loaded, uint64_t *offset, uint64_t *stat);
173 static void i40e_stat_update_48(struct i40e_hw *hw,
179 static void i40e_pf_config_irq0(struct i40e_hw *hw);
180 static void i40e_dev_interrupt_handler(
181 __rte_unused struct rte_intr_handle *handle, void *param);
182 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
183 uint32_t base, uint32_t num);
184 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
185 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
187 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
189 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
190 static int i40e_veb_release(struct i40e_veb *veb);
191 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
192 struct i40e_vsi *vsi);
193 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
194 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
195 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
196 struct i40e_macvlan_filter *mv_f,
198 struct ether_addr *addr);
199 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
200 struct i40e_macvlan_filter *mv_f,
203 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
204 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
205 struct rte_eth_rss_conf *rss_conf);
206 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
207 struct rte_eth_rss_conf *rss_conf);
209 /* Default hash key buffer for RSS */
210 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
212 static struct rte_pci_id pci_id_i40e_map[] = {
213 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
214 #include "rte_pci_dev_ids.h"
215 { .vendor_id = 0, /* sentinel */ },
218 static struct eth_dev_ops i40e_eth_dev_ops = {
219 .dev_configure = i40e_dev_configure,
220 .dev_start = i40e_dev_start,
221 .dev_stop = i40e_dev_stop,
222 .dev_close = i40e_dev_close,
223 .promiscuous_enable = i40e_dev_promiscuous_enable,
224 .promiscuous_disable = i40e_dev_promiscuous_disable,
225 .allmulticast_enable = i40e_dev_allmulticast_enable,
226 .allmulticast_disable = i40e_dev_allmulticast_disable,
227 .dev_set_link_up = i40e_dev_set_link_up,
228 .dev_set_link_down = i40e_dev_set_link_down,
229 .link_update = i40e_dev_link_update,
230 .stats_get = i40e_dev_stats_get,
231 .stats_reset = i40e_dev_stats_reset,
232 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
233 .dev_infos_get = i40e_dev_info_get,
234 .vlan_filter_set = i40e_vlan_filter_set,
235 .vlan_tpid_set = i40e_vlan_tpid_set,
236 .vlan_offload_set = i40e_vlan_offload_set,
237 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
238 .vlan_pvid_set = i40e_vlan_pvid_set,
239 .rx_queue_start = i40e_dev_rx_queue_start,
240 .rx_queue_stop = i40e_dev_rx_queue_stop,
241 .tx_queue_start = i40e_dev_tx_queue_start,
242 .tx_queue_stop = i40e_dev_tx_queue_stop,
243 .rx_queue_setup = i40e_dev_rx_queue_setup,
244 .rx_queue_release = i40e_dev_rx_queue_release,
245 .rx_queue_count = i40e_dev_rx_queue_count,
246 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
247 .tx_queue_setup = i40e_dev_tx_queue_setup,
248 .tx_queue_release = i40e_dev_tx_queue_release,
249 .dev_led_on = i40e_dev_led_on,
250 .dev_led_off = i40e_dev_led_off,
251 .flow_ctrl_set = i40e_flow_ctrl_set,
252 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
253 .mac_addr_add = i40e_macaddr_add,
254 .mac_addr_remove = i40e_macaddr_remove,
255 .reta_update = i40e_dev_rss_reta_update,
256 .reta_query = i40e_dev_rss_reta_query,
257 .rss_hash_update = i40e_dev_rss_hash_update,
258 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
261 static struct eth_driver rte_i40e_pmd = {
263 .name = "rte_i40e_pmd",
264 .id_table = pci_id_i40e_map,
265 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
267 .eth_dev_init = eth_i40e_dev_init,
268 .dev_private_size = sizeof(struct i40e_adapter),
272 i40e_prev_power_of_2(int n)
290 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
291 struct rte_eth_link *link)
293 struct rte_eth_link *dst = link;
294 struct rte_eth_link *src = &(dev->data->dev_link);
296 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297 *(uint64_t *)src) == 0)
304 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
305 struct rte_eth_link *link)
307 struct rte_eth_link *dst = &(dev->data->dev_link);
308 struct rte_eth_link *src = link;
310 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
311 *(uint64_t *)src) == 0)
318 * Driver initialization routine.
319 * Invoked once at EAL init time.
320 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
323 rte_i40e_pmd_init(const char *name __rte_unused,
324 const char *params __rte_unused)
326 PMD_INIT_FUNC_TRACE();
327 rte_eth_driver_register(&rte_i40e_pmd);
332 static struct rte_driver rte_i40e_driver = {
334 .init = rte_i40e_pmd_init,
337 PMD_REGISTER_DRIVER(rte_i40e_driver);
340 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
341 struct rte_eth_dev *dev)
343 struct rte_pci_device *pci_dev;
344 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
345 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
346 struct i40e_vsi *vsi;
351 PMD_INIT_FUNC_TRACE();
353 dev->dev_ops = &i40e_eth_dev_ops;
354 dev->rx_pkt_burst = i40e_recv_pkts;
355 dev->tx_pkt_burst = i40e_xmit_pkts;
357 /* for secondary processes, we don't initialise any further as primary
358 * has already done this work. Only check we don't need a different
360 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
361 if (dev->data->scattered_rx)
362 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
365 pci_dev = dev->pci_dev;
366 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
367 pf->adapter->eth_dev = dev;
368 pf->dev_data = dev->data;
370 hw->back = I40E_PF_TO_ADAPTER(pf);
371 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
373 PMD_INIT_LOG(ERR, "Hardware is not available, "
374 "as address is NULL");
378 hw->vendor_id = pci_dev->id.vendor_id;
379 hw->device_id = pci_dev->id.device_id;
380 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
381 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
382 hw->bus.device = pci_dev->addr.devid;
383 hw->bus.func = pci_dev->addr.function;
385 /* Make sure all is clean before doing PF reset */
388 /* Reset here to make sure all is clean for each PF */
389 ret = i40e_pf_reset(hw);
391 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
395 /* Initialize the shared code (base driver) */
396 ret = i40e_init_shared_code(hw);
398 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
402 /* Initialize the parameters for adminq */
403 i40e_init_adminq_parameter(hw);
404 ret = i40e_init_adminq(hw);
405 if (ret != I40E_SUCCESS) {
406 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
409 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
410 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
411 hw->aq.api_maj_ver, hw->aq.api_min_ver,
412 ((hw->nvm.version >> 12) & 0xf),
413 ((hw->nvm.version >> 4) & 0xff),
414 (hw->nvm.version & 0xf), hw->nvm.eetrack);
417 ret = i40e_aq_stop_lldp(hw, true, NULL);
418 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
419 PMD_INIT_LOG(INFO, "Failed to stop lldp");
422 i40e_clear_pxe_mode(hw);
424 /* Get hw capabilities */
425 ret = i40e_get_cap(hw);
426 if (ret != I40E_SUCCESS) {
427 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
428 goto err_get_capabilities;
431 /* Initialize parameters for PF */
432 ret = i40e_pf_parameter_init(dev);
434 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
435 goto err_parameter_init;
438 /* Initialize the queue management */
439 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
441 PMD_INIT_LOG(ERR, "Failed to init queue pool");
442 goto err_qp_pool_init;
444 ret = i40e_res_pool_init(&pf->msix_pool, 1,
445 hw->func_caps.num_msix_vectors - 1);
447 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
448 goto err_msix_pool_init;
451 /* Initialize lan hmc */
452 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
453 hw->func_caps.num_rx_qp, 0, 0);
454 if (ret != I40E_SUCCESS) {
455 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
456 goto err_init_lan_hmc;
459 /* Configure lan hmc */
460 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
461 if (ret != I40E_SUCCESS) {
462 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
463 goto err_configure_lan_hmc;
466 /* Get and check the mac address */
467 i40e_get_mac_addr(hw, hw->mac.addr);
468 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
469 PMD_INIT_LOG(ERR, "mac address is not valid");
471 goto err_get_mac_addr;
473 /* Copy the permanent MAC address */
474 ether_addr_copy((struct ether_addr *) hw->mac.addr,
475 (struct ether_addr *) hw->mac.perm_addr);
477 /* Disable flow control */
478 hw->fc.requested_mode = I40E_FC_NONE;
479 i40e_set_fc(hw, &aq_fail, TRUE);
481 /* PF setup, which includes VSI setup */
482 ret = i40e_pf_setup(pf);
484 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
485 goto err_setup_pf_switch;
490 /* Disable double vlan by default */
491 i40e_vsi_config_double_vlan(vsi, FALSE);
493 if (!vsi->max_macaddrs)
494 len = ETHER_ADDR_LEN;
496 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
498 /* Should be after VSI initialized */
499 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
500 if (!dev->data->mac_addrs) {
501 PMD_INIT_LOG(ERR, "Failed to allocated memory "
502 "for storing mac address");
503 goto err_get_mac_addr;
505 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
506 &dev->data->mac_addrs[0]);
508 /* initialize pf host driver to setup SRIOV resource if applicable */
509 i40e_pf_host_init(dev);
511 /* register callback func to eal lib */
512 rte_intr_callback_register(&(pci_dev->intr_handle),
513 i40e_dev_interrupt_handler, (void *)dev);
515 /* configure and enable device interrupt */
516 i40e_pf_config_irq0(hw);
517 i40e_pf_enable_irq0(hw);
519 /* enable uio intr after callback register */
520 rte_intr_enable(&(pci_dev->intr_handle));
525 rte_free(pf->main_vsi);
527 err_configure_lan_hmc:
528 (void)i40e_shutdown_lan_hmc(hw);
530 i40e_res_pool_destroy(&pf->msix_pool);
532 i40e_res_pool_destroy(&pf->qp_pool);
535 err_get_capabilities:
536 (void)i40e_shutdown_adminq(hw);
542 i40e_dev_configure(struct rte_eth_dev *dev)
544 return i40e_dev_init_vlan(dev);
548 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
550 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
551 uint16_t msix_vect = vsi->msix_intr;
554 for (i = 0; i < vsi->nb_qps; i++) {
555 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
556 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
560 if (vsi->type != I40E_VSI_SRIOV) {
561 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
562 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
566 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
567 vsi->user_param + (msix_vect - 1);
569 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
571 I40E_WRITE_FLUSH(hw);
574 static inline uint16_t
575 i40e_calc_itr_interval(int16_t interval)
577 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
578 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
580 /* Convert to hardware count, as writing each 1 represents 2 us */
585 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
588 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
589 uint16_t msix_vect = vsi->msix_intr;
590 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
593 for (i = 0; i < vsi->nb_qps; i++)
594 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
596 /* Bind all RX queues to allocated MSIX interrupt */
597 for (i = 0; i < vsi->nb_qps; i++) {
598 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
599 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
600 ((vsi->base_queue + i + 1) <<
601 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
602 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
603 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
605 if (i == vsi->nb_qps - 1)
606 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
607 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
610 /* Write first RX queue to Link list register as the head element */
611 if (vsi->type != I40E_VSI_SRIOV) {
612 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
613 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
614 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
616 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
617 msix_vect - 1), interval);
619 /* Disable auto-mask on enabling of all none-zero interrupt */
620 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
621 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
625 /* num_msix_vectors_vf needs to minus irq0 */
626 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
627 vsi->user_param + (msix_vect - 1);
629 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
630 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
631 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
634 I40E_WRITE_FLUSH(hw);
638 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
640 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
641 uint16_t interval = i40e_calc_itr_interval(\
642 RTE_LIBRTE_I40E_ITR_INTERVAL);
644 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
645 I40E_PFINT_DYN_CTLN_INTENA_MASK |
646 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
647 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
648 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
652 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
654 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
656 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
659 static inline uint8_t
660 i40e_parse_link_speed(uint16_t eth_link_speed)
662 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
664 switch (eth_link_speed) {
665 case ETH_LINK_SPEED_40G:
666 link_speed = I40E_LINK_SPEED_40GB;
668 case ETH_LINK_SPEED_20G:
669 link_speed = I40E_LINK_SPEED_20GB;
671 case ETH_LINK_SPEED_10G:
672 link_speed = I40E_LINK_SPEED_10GB;
674 case ETH_LINK_SPEED_1000:
675 link_speed = I40E_LINK_SPEED_1GB;
677 case ETH_LINK_SPEED_100:
678 link_speed = I40E_LINK_SPEED_100MB;
686 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
688 enum i40e_status_code status;
689 struct i40e_aq_get_phy_abilities_resp phy_ab;
690 struct i40e_aq_set_phy_config phy_conf;
691 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
692 I40E_AQ_PHY_FLAG_PAUSE_RX |
693 I40E_AQ_PHY_FLAG_LOW_POWER;
694 const uint8_t advt = I40E_LINK_SPEED_40GB |
695 I40E_LINK_SPEED_10GB |
696 I40E_LINK_SPEED_1GB |
697 I40E_LINK_SPEED_100MB;
700 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
705 memset(&phy_conf, 0, sizeof(phy_conf));
707 /* bits 0-2 use the values from get_phy_abilities_resp */
709 abilities |= phy_ab.abilities & mask;
711 /* update ablities and speed */
712 if (abilities & I40E_AQ_PHY_AN_ENABLED)
713 phy_conf.link_speed = advt;
715 phy_conf.link_speed = force_speed;
717 phy_conf.abilities = abilities;
719 /* use get_phy_abilities_resp value for the rest */
720 phy_conf.phy_type = phy_ab.phy_type;
721 phy_conf.eee_capability = phy_ab.eee_capability;
722 phy_conf.eeer = phy_ab.eeer_val;
723 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
725 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
726 phy_ab.abilities, phy_ab.link_speed);
727 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
728 phy_conf.abilities, phy_conf.link_speed);
730 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
738 i40e_apply_link_speed(struct rte_eth_dev *dev)
741 uint8_t abilities = 0;
742 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
743 struct rte_eth_conf *conf = &dev->data->dev_conf;
745 speed = i40e_parse_link_speed(conf->link_speed);
746 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
747 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
748 abilities |= I40E_AQ_PHY_AN_ENABLED;
750 abilities |= I40E_AQ_PHY_LINK_ENABLED;
752 return i40e_phy_conf_link(hw, abilities, speed);
756 i40e_dev_start(struct rte_eth_dev *dev)
758 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
759 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
760 struct i40e_vsi *vsi = pf->main_vsi;
763 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
764 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
765 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
766 dev->data->dev_conf.link_duplex,
772 ret = i40e_vsi_init(vsi);
773 if (ret != I40E_SUCCESS) {
774 PMD_DRV_LOG(ERR, "Failed to init VSI");
778 /* Map queues with MSIX interrupt */
779 i40e_vsi_queues_bind_intr(vsi);
780 i40e_vsi_enable_queues_intr(vsi);
782 /* Enable all queues which have been configured */
783 ret = i40e_vsi_switch_queues(vsi, TRUE);
784 if (ret != I40E_SUCCESS) {
785 PMD_DRV_LOG(ERR, "Failed to enable VSI");
789 /* Enable receiving broadcast packets */
790 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
791 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
792 if (ret != I40E_SUCCESS)
793 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
796 /* Apply link configure */
797 ret = i40e_apply_link_speed(dev);
798 if (I40E_SUCCESS != ret) {
799 PMD_DRV_LOG(ERR, "Fail to apply link setting");
806 i40e_vsi_switch_queues(vsi, FALSE);
812 i40e_dev_stop(struct rte_eth_dev *dev)
814 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
815 struct i40e_vsi *vsi = pf->main_vsi;
817 /* Disable all queues */
818 i40e_vsi_switch_queues(vsi, FALSE);
821 i40e_dev_set_link_down(dev);
823 /* un-map queues with interrupt registers */
824 i40e_vsi_disable_queues_intr(vsi);
825 i40e_vsi_queues_unbind_intr(vsi);
829 i40e_dev_close(struct rte_eth_dev *dev)
831 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
832 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835 PMD_INIT_FUNC_TRACE();
839 /* Disable interrupt */
840 i40e_pf_disable_irq0(hw);
841 rte_intr_disable(&(dev->pci_dev->intr_handle));
843 /* shutdown and destroy the HMC */
844 i40e_shutdown_lan_hmc(hw);
846 /* release all the existing VSIs and VEBs */
847 i40e_vsi_release(pf->main_vsi);
849 /* shutdown the adminq */
850 i40e_aq_queue_shutdown(hw, true);
851 i40e_shutdown_adminq(hw);
853 i40e_res_pool_destroy(&pf->qp_pool);
854 i40e_res_pool_destroy(&pf->msix_pool);
856 /* force a PF reset to clean anything leftover */
857 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
858 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
859 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
860 I40E_WRITE_FLUSH(hw);
864 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
866 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
867 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
868 struct i40e_vsi *vsi = pf->main_vsi;
871 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
873 if (status != I40E_SUCCESS)
874 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
876 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
878 if (status != I40E_SUCCESS)
879 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
884 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
886 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
887 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
888 struct i40e_vsi *vsi = pf->main_vsi;
891 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
893 if (status != I40E_SUCCESS)
894 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
896 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
898 if (status != I40E_SUCCESS)
899 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
903 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
905 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
906 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
907 struct i40e_vsi *vsi = pf->main_vsi;
910 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
911 if (ret != I40E_SUCCESS)
912 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
916 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
918 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
919 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
920 struct i40e_vsi *vsi = pf->main_vsi;
923 if (dev->data->promiscuous == 1)
924 return; /* must remain in all_multicast mode */
926 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
927 vsi->seid, FALSE, NULL);
928 if (ret != I40E_SUCCESS)
929 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
933 * Set device link up.
936 i40e_dev_set_link_up(struct rte_eth_dev *dev)
938 /* re-apply link speed setting */
939 return i40e_apply_link_speed(dev);
943 * Set device link down.
946 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
948 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
949 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
950 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
952 return i40e_phy_conf_link(hw, abilities, speed);
956 i40e_dev_link_update(struct rte_eth_dev *dev,
957 __rte_unused int wait_to_complete)
959 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
960 struct i40e_link_status link_status;
961 struct rte_eth_link link, old;
964 memset(&link, 0, sizeof(link));
965 memset(&old, 0, sizeof(old));
966 memset(&link_status, 0, sizeof(link_status));
967 rte_i40e_dev_atomic_read_link_status(dev, &old);
969 /* Get link status information from hardware */
970 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
971 if (status != I40E_SUCCESS) {
972 link.link_speed = ETH_LINK_SPEED_100;
973 link.link_duplex = ETH_LINK_FULL_DUPLEX;
974 PMD_DRV_LOG(ERR, "Failed to get link info");
978 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
980 if (!link.link_status)
983 /* i40e uses full duplex only */
984 link.link_duplex = ETH_LINK_FULL_DUPLEX;
986 /* Parse the link status */
987 switch (link_status.link_speed) {
988 case I40E_LINK_SPEED_100MB:
989 link.link_speed = ETH_LINK_SPEED_100;
991 case I40E_LINK_SPEED_1GB:
992 link.link_speed = ETH_LINK_SPEED_1000;
994 case I40E_LINK_SPEED_10GB:
995 link.link_speed = ETH_LINK_SPEED_10G;
997 case I40E_LINK_SPEED_20GB:
998 link.link_speed = ETH_LINK_SPEED_20G;
1000 case I40E_LINK_SPEED_40GB:
1001 link.link_speed = ETH_LINK_SPEED_40G;
1004 link.link_speed = ETH_LINK_SPEED_100;
1009 rte_i40e_dev_atomic_write_link_status(dev, &link);
1010 if (link.link_status == old.link_status)
1016 /* Get all the statistics of a VSI */
1018 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1020 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1021 struct i40e_eth_stats *nes = &vsi->eth_stats;
1022 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1023 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1025 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1026 vsi->offset_loaded, &oes->rx_bytes,
1028 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1029 vsi->offset_loaded, &oes->rx_unicast,
1031 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1032 vsi->offset_loaded, &oes->rx_multicast,
1033 &nes->rx_multicast);
1034 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1035 vsi->offset_loaded, &oes->rx_broadcast,
1036 &nes->rx_broadcast);
1037 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1038 &oes->rx_discards, &nes->rx_discards);
1039 /* GLV_REPC not supported */
1040 /* GLV_RMPC not supported */
1041 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1042 &oes->rx_unknown_protocol,
1043 &nes->rx_unknown_protocol);
1044 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1045 vsi->offset_loaded, &oes->tx_bytes,
1047 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1048 vsi->offset_loaded, &oes->tx_unicast,
1050 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1051 vsi->offset_loaded, &oes->tx_multicast,
1052 &nes->tx_multicast);
1053 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1054 vsi->offset_loaded, &oes->tx_broadcast,
1055 &nes->tx_broadcast);
1056 /* GLV_TDPC not supported */
1057 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1058 &oes->tx_errors, &nes->tx_errors);
1059 vsi->offset_loaded = true;
1061 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1063 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
1064 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
1065 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
1066 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
1067 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
1068 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1069 nes->rx_unknown_protocol);
1070 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
1071 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
1072 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
1073 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
1074 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
1075 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
1076 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1080 /* Get all statistics of a port */
1082 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1085 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1088 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1090 /* Get statistics of struct i40e_eth_stats */
1091 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1092 I40E_GLPRT_GORCL(hw->port),
1093 pf->offset_loaded, &os->eth.rx_bytes,
1095 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1096 I40E_GLPRT_UPRCL(hw->port),
1097 pf->offset_loaded, &os->eth.rx_unicast,
1098 &ns->eth.rx_unicast);
1099 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1100 I40E_GLPRT_MPRCL(hw->port),
1101 pf->offset_loaded, &os->eth.rx_multicast,
1102 &ns->eth.rx_multicast);
1103 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1104 I40E_GLPRT_BPRCL(hw->port),
1105 pf->offset_loaded, &os->eth.rx_broadcast,
1106 &ns->eth.rx_broadcast);
1107 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1108 pf->offset_loaded, &os->eth.rx_discards,
1109 &ns->eth.rx_discards);
1110 /* GLPRT_REPC not supported */
1111 /* GLPRT_RMPC not supported */
1112 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1114 &os->eth.rx_unknown_protocol,
1115 &ns->eth.rx_unknown_protocol);
1116 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1117 I40E_GLPRT_GOTCL(hw->port),
1118 pf->offset_loaded, &os->eth.tx_bytes,
1120 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1121 I40E_GLPRT_UPTCL(hw->port),
1122 pf->offset_loaded, &os->eth.tx_unicast,
1123 &ns->eth.tx_unicast);
1124 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1125 I40E_GLPRT_MPTCL(hw->port),
1126 pf->offset_loaded, &os->eth.tx_multicast,
1127 &ns->eth.tx_multicast);
1128 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1129 I40E_GLPRT_BPTCL(hw->port),
1130 pf->offset_loaded, &os->eth.tx_broadcast,
1131 &ns->eth.tx_broadcast);
1132 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1133 pf->offset_loaded, &os->eth.tx_discards,
1134 &ns->eth.tx_discards);
1135 /* GLPRT_TEPC not supported */
1137 /* additional port specific stats */
1138 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1139 pf->offset_loaded, &os->tx_dropped_link_down,
1140 &ns->tx_dropped_link_down);
1141 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1142 pf->offset_loaded, &os->crc_errors,
1144 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1145 pf->offset_loaded, &os->illegal_bytes,
1146 &ns->illegal_bytes);
1147 /* GLPRT_ERRBC not supported */
1148 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1149 pf->offset_loaded, &os->mac_local_faults,
1150 &ns->mac_local_faults);
1151 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1152 pf->offset_loaded, &os->mac_remote_faults,
1153 &ns->mac_remote_faults);
1154 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1155 pf->offset_loaded, &os->rx_length_errors,
1156 &ns->rx_length_errors);
1157 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1158 pf->offset_loaded, &os->link_xon_rx,
1160 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1161 pf->offset_loaded, &os->link_xoff_rx,
1163 for (i = 0; i < 8; i++) {
1164 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1166 &os->priority_xon_rx[i],
1167 &ns->priority_xon_rx[i]);
1168 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1170 &os->priority_xoff_rx[i],
1171 &ns->priority_xoff_rx[i]);
1173 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1174 pf->offset_loaded, &os->link_xon_tx,
1176 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1177 pf->offset_loaded, &os->link_xoff_tx,
1179 for (i = 0; i < 8; i++) {
1180 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1182 &os->priority_xon_tx[i],
1183 &ns->priority_xon_tx[i]);
1184 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1186 &os->priority_xoff_tx[i],
1187 &ns->priority_xoff_tx[i]);
1188 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1190 &os->priority_xon_2_xoff[i],
1191 &ns->priority_xon_2_xoff[i]);
1193 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1194 I40E_GLPRT_PRC64L(hw->port),
1195 pf->offset_loaded, &os->rx_size_64,
1197 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1198 I40E_GLPRT_PRC127L(hw->port),
1199 pf->offset_loaded, &os->rx_size_127,
1201 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1202 I40E_GLPRT_PRC255L(hw->port),
1203 pf->offset_loaded, &os->rx_size_255,
1205 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1206 I40E_GLPRT_PRC511L(hw->port),
1207 pf->offset_loaded, &os->rx_size_511,
1209 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1210 I40E_GLPRT_PRC1023L(hw->port),
1211 pf->offset_loaded, &os->rx_size_1023,
1213 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1214 I40E_GLPRT_PRC1522L(hw->port),
1215 pf->offset_loaded, &os->rx_size_1522,
1217 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1218 I40E_GLPRT_PRC9522L(hw->port),
1219 pf->offset_loaded, &os->rx_size_big,
1221 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1222 pf->offset_loaded, &os->rx_undersize,
1224 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1225 pf->offset_loaded, &os->rx_fragments,
1227 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1228 pf->offset_loaded, &os->rx_oversize,
1230 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1231 pf->offset_loaded, &os->rx_jabber,
1233 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1234 I40E_GLPRT_PTC64L(hw->port),
1235 pf->offset_loaded, &os->tx_size_64,
1237 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1238 I40E_GLPRT_PTC127L(hw->port),
1239 pf->offset_loaded, &os->tx_size_127,
1241 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1242 I40E_GLPRT_PTC255L(hw->port),
1243 pf->offset_loaded, &os->tx_size_255,
1245 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1246 I40E_GLPRT_PTC511L(hw->port),
1247 pf->offset_loaded, &os->tx_size_511,
1249 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1250 I40E_GLPRT_PTC1023L(hw->port),
1251 pf->offset_loaded, &os->tx_size_1023,
1253 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1254 I40E_GLPRT_PTC1522L(hw->port),
1255 pf->offset_loaded, &os->tx_size_1522,
1257 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1258 I40E_GLPRT_PTC9522L(hw->port),
1259 pf->offset_loaded, &os->tx_size_big,
1261 /* GLPRT_MSPDC not supported */
1262 /* GLPRT_XEC not supported */
1264 pf->offset_loaded = true;
1266 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1267 ns->eth.rx_broadcast;
1268 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1269 ns->eth.tx_broadcast;
1270 stats->ibytes = ns->eth.rx_bytes;
1271 stats->obytes = ns->eth.tx_bytes;
1272 stats->oerrors = ns->eth.tx_errors;
1273 stats->imcasts = ns->eth.rx_multicast;
1276 i40e_update_vsi_stats(pf->main_vsi);
1278 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1279 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
1280 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
1281 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
1282 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
1283 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
1284 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1285 ns->eth.rx_unknown_protocol);
1286 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
1287 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
1288 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
1289 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
1290 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
1291 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
1293 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
1294 ns->tx_dropped_link_down);
1295 PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
1296 PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
1298 PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
1299 PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
1300 ns->mac_local_faults);
1301 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
1302 ns->mac_remote_faults);
1303 PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
1304 ns->rx_length_errors);
1305 PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
1306 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
1307 for (i = 0; i < 8; i++) {
1308 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
1309 i, ns->priority_xon_rx[i]);
1310 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
1311 i, ns->priority_xoff_rx[i]);
1313 PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
1314 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
1315 for (i = 0; i < 8; i++) {
1316 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
1317 i, ns->priority_xon_tx[i]);
1318 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
1319 i, ns->priority_xoff_tx[i]);
1320 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
1321 i, ns->priority_xon_2_xoff[i]);
1323 PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
1324 PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
1325 PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
1326 PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
1327 PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
1328 PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
1329 PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
1330 PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
1331 PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
1332 PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
1333 PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
1334 PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
1335 PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
1336 PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
1337 PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
1338 PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
1339 PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
1340 PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
1341 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1342 ns->mac_short_packet_dropped);
1343 PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
1344 ns->checksum_error);
1345 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1348 /* Reset the statistics */
1350 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1352 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1354 /* It results in reloading the start point of each counter */
1355 pf->offset_loaded = false;
1359 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1360 __rte_unused uint16_t queue_id,
1361 __rte_unused uint8_t stat_idx,
1362 __rte_unused uint8_t is_rx)
1364 PMD_INIT_FUNC_TRACE();
1370 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1372 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1373 struct i40e_vsi *vsi = pf->main_vsi;
1375 dev_info->max_rx_queues = vsi->nb_qps;
1376 dev_info->max_tx_queues = vsi->nb_qps;
1377 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1378 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1379 dev_info->max_mac_addrs = vsi->max_macaddrs;
1380 dev_info->max_vfs = dev->pci_dev->max_vfs;
1381 dev_info->rx_offload_capa =
1382 DEV_RX_OFFLOAD_VLAN_STRIP |
1383 DEV_RX_OFFLOAD_IPV4_CKSUM |
1384 DEV_RX_OFFLOAD_UDP_CKSUM |
1385 DEV_RX_OFFLOAD_TCP_CKSUM;
1386 dev_info->tx_offload_capa =
1387 DEV_TX_OFFLOAD_VLAN_INSERT |
1388 DEV_TX_OFFLOAD_IPV4_CKSUM |
1389 DEV_TX_OFFLOAD_UDP_CKSUM |
1390 DEV_TX_OFFLOAD_TCP_CKSUM |
1391 DEV_TX_OFFLOAD_SCTP_CKSUM;
1395 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1397 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1398 struct i40e_vsi *vsi = pf->main_vsi;
1399 PMD_INIT_FUNC_TRACE();
1402 return i40e_vsi_add_vlan(vsi, vlan_id);
1404 return i40e_vsi_delete_vlan(vsi, vlan_id);
1408 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1409 __rte_unused uint16_t tpid)
1411 PMD_INIT_FUNC_TRACE();
1415 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1417 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1418 struct i40e_vsi *vsi = pf->main_vsi;
1420 if (mask & ETH_VLAN_STRIP_MASK) {
1421 /* Enable or disable VLAN stripping */
1422 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1423 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1425 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1428 if (mask & ETH_VLAN_EXTEND_MASK) {
1429 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1430 i40e_vsi_config_double_vlan(vsi, TRUE);
1432 i40e_vsi_config_double_vlan(vsi, FALSE);
1437 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1438 __rte_unused uint16_t queue,
1439 __rte_unused int on)
1441 PMD_INIT_FUNC_TRACE();
1445 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1447 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1448 struct i40e_vsi *vsi = pf->main_vsi;
1449 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1450 struct i40e_vsi_vlan_pvid_info info;
1452 memset(&info, 0, sizeof(info));
1455 info.config.pvid = pvid;
1457 info.config.reject.tagged =
1458 data->dev_conf.txmode.hw_vlan_reject_tagged;
1459 info.config.reject.untagged =
1460 data->dev_conf.txmode.hw_vlan_reject_untagged;
1463 return i40e_vsi_vlan_pvid_set(vsi, &info);
1467 i40e_dev_led_on(struct rte_eth_dev *dev)
1469 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1470 uint32_t mode = i40e_led_get(hw);
1473 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1479 i40e_dev_led_off(struct rte_eth_dev *dev)
1481 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1482 uint32_t mode = i40e_led_get(hw);
1485 i40e_led_set(hw, 0, false);
1491 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1492 __rte_unused struct rte_eth_fc_conf *fc_conf)
1494 PMD_INIT_FUNC_TRACE();
1500 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1501 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1503 PMD_INIT_FUNC_TRACE();
1508 /* Add a MAC address, and update filters */
1510 i40e_macaddr_add(struct rte_eth_dev *dev,
1511 struct ether_addr *mac_addr,
1512 __attribute__((unused)) uint32_t index,
1513 __attribute__((unused)) uint32_t pool)
1515 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1516 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1517 struct i40e_vsi *vsi = pf->main_vsi;
1518 struct ether_addr old_mac;
1521 if (!is_valid_assigned_ether_addr(mac_addr)) {
1522 PMD_DRV_LOG(ERR, "Invalid ethernet address");
1526 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1527 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
1531 /* Write mac address */
1532 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1533 mac_addr->addr_bytes, NULL);
1534 if (ret != I40E_SUCCESS) {
1535 PMD_DRV_LOG(ERR, "Failed to write mac address");
1539 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1540 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1543 ret = i40e_vsi_add_mac(vsi, mac_addr);
1544 if (ret != I40E_SUCCESS) {
1545 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1549 ether_addr_copy(mac_addr, &pf->dev_addr);
1550 i40e_vsi_delete_mac(vsi, &old_mac);
1553 /* Remove a MAC address, and update filters */
1555 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1557 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1558 struct i40e_vsi *vsi = pf->main_vsi;
1559 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1560 struct ether_addr *macaddr;
1562 struct i40e_hw *hw =
1563 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1565 if (index >= vsi->max_macaddrs)
1568 macaddr = &(data->mac_addrs[index]);
1569 if (!is_valid_assigned_ether_addr(macaddr))
1572 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1573 hw->mac.perm_addr, NULL);
1574 if (ret != I40E_SUCCESS) {
1575 PMD_DRV_LOG(ERR, "Failed to write mac address");
1579 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1581 ret = i40e_vsi_delete_mac(vsi, macaddr);
1582 if (ret != I40E_SUCCESS)
1585 /* Clear device address as it has been removed */
1586 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1587 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1591 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1592 struct rte_eth_rss_reta *reta_conf)
1594 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1596 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1598 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1600 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1602 mask = (uint8_t)((reta_conf->mask_hi >>
1611 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1613 for (j = 0, lut = 0; j < 4; j++) {
1614 if (mask & (0x1 << j))
1615 lut |= reta_conf->reta[i + j] << (8 * j);
1617 lut |= l & (0xFF << (8 * j));
1619 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1626 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1627 struct rte_eth_rss_reta *reta_conf)
1629 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1631 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1633 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1635 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1637 mask = (uint8_t)((reta_conf->mask_hi >>
1643 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1644 for (j = 0; j < 4; j++) {
1645 if (mask & (0x1 << j))
1646 reta_conf->reta[i + j] =
1647 (uint8_t)((lut >> (8 * j)) & 0xFF);
1655 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1656 * @hw: pointer to the HW structure
1657 * @mem: pointer to mem struct to fill out
1658 * @size: size of memory requested
1659 * @alignment: what to align the allocation to
1661 enum i40e_status_code
1662 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1663 struct i40e_dma_mem *mem,
1667 static uint64_t id = 0;
1668 const struct rte_memzone *mz = NULL;
1669 char z_name[RTE_MEMZONE_NAMESIZE];
1672 return I40E_ERR_PARAM;
1675 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1676 #ifdef RTE_LIBRTE_XEN_DOM0
1677 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1680 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1683 return I40E_ERR_NO_MEMORY;
1688 #ifdef RTE_LIBRTE_XEN_DOM0
1689 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1691 mem->pa = mz->phys_addr;
1694 return I40E_SUCCESS;
1698 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1699 * @hw: pointer to the HW structure
1700 * @mem: ptr to mem struct to free
1702 enum i40e_status_code
1703 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1704 struct i40e_dma_mem *mem)
1706 if (!mem || !mem->va)
1707 return I40E_ERR_PARAM;
1712 return I40E_SUCCESS;
1716 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1717 * @hw: pointer to the HW structure
1718 * @mem: pointer to mem struct to fill out
1719 * @size: size of memory requested
1721 enum i40e_status_code
1722 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1723 struct i40e_virt_mem *mem,
1727 return I40E_ERR_PARAM;
1730 mem->va = rte_zmalloc("i40e", size, 0);
1733 return I40E_SUCCESS;
1735 return I40E_ERR_NO_MEMORY;
1739 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1740 * @hw: pointer to the HW structure
1741 * @mem: pointer to mem struct to free
1743 enum i40e_status_code
1744 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1745 struct i40e_virt_mem *mem)
1748 return I40E_ERR_PARAM;
1753 return I40E_SUCCESS;
1757 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1759 rte_spinlock_init(&sp->spinlock);
1763 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1765 rte_spinlock_lock(&sp->spinlock);
1769 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1771 rte_spinlock_unlock(&sp->spinlock);
1775 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1781 * Get the hardware capabilities, which will be parsed
1782 * and saved into struct i40e_hw.
1785 i40e_get_cap(struct i40e_hw *hw)
1787 struct i40e_aqc_list_capabilities_element_resp *buf;
1788 uint16_t len, size = 0;
1791 /* Calculate a huge enough buff for saving response data temporarily */
1792 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1793 I40E_MAX_CAP_ELE_NUM;
1794 buf = rte_zmalloc("i40e", len, 0);
1796 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1797 return I40E_ERR_NO_MEMORY;
1800 /* Get, parse the capabilities and save it to hw */
1801 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1802 i40e_aqc_opc_list_func_capabilities, NULL);
1803 if (ret != I40E_SUCCESS)
1804 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1806 /* Free the temporary buffer after being used */
1813 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1815 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1816 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1817 uint16_t sum_queues = 0, sum_vsis;
1819 /* First check if FW support SRIOV */
1820 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1821 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
1825 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1826 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1827 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
1828 /* Allocate queues for pf */
1829 if (hw->func_caps.rss) {
1830 pf->flags |= I40E_FLAG_RSS;
1831 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1832 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1833 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1836 sum_queues = pf->lan_nb_qps;
1837 /* Default VSI is not counted in */
1839 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
1841 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1842 pf->flags |= I40E_FLAG_SRIOV;
1843 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1844 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1845 PMD_INIT_LOG(ERR, "Config VF number %u, "
1846 "max supported %u.",
1847 dev->pci_dev->max_vfs,
1848 hw->func_caps.num_vfs);
1851 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1852 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1853 "max support %u queues.",
1854 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
1857 pf->vf_num = dev->pci_dev->max_vfs;
1858 sum_queues += pf->vf_nb_qps * pf->vf_num;
1859 sum_vsis += pf->vf_num;
1860 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
1861 pf->vf_num, pf->vf_nb_qps);
1865 if (hw->func_caps.vmdq) {
1866 pf->flags |= I40E_FLAG_VMDQ;
1867 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1868 sum_queues += pf->vmdq_nb_qps;
1870 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
1873 if (hw->func_caps.fd) {
1874 pf->flags |= I40E_FLAG_FDIR;
1875 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1877 * Each flow director consumes one VSI and one queue,
1878 * but can't calculate out predictably here.
1882 if (sum_vsis > pf->max_num_vsi ||
1883 sum_queues > hw->func_caps.num_rx_qp) {
1884 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
1885 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
1886 pf->max_num_vsi, sum_vsis);
1887 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
1888 hw->func_caps.num_rx_qp, sum_queues);
1892 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
1894 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1895 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
1896 sum_vsis, hw->func_caps.num_msix_vectors);
1899 return I40E_SUCCESS;
1903 i40e_pf_get_switch_config(struct i40e_pf *pf)
1905 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1906 struct i40e_aqc_get_switch_config_resp *switch_config;
1907 struct i40e_aqc_switch_config_element_resp *element;
1908 uint16_t start_seid = 0, num_reported;
1911 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1912 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1913 if (!switch_config) {
1914 PMD_DRV_LOG(ERR, "Failed to allocated memory");
1918 /* Get the switch configurations */
1919 ret = i40e_aq_get_switch_config(hw, switch_config,
1920 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1921 if (ret != I40E_SUCCESS) {
1922 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
1925 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1926 if (num_reported != 1) { /* The number should be 1 */
1927 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
1931 /* Parse the switch configuration elements */
1932 element = &(switch_config->element[0]);
1933 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1934 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1935 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1937 PMD_DRV_LOG(INFO, "Unknown element type");
1940 rte_free(switch_config);
1946 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1949 struct pool_entry *entry;
1951 if (pool == NULL || num == 0)
1954 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1955 if (entry == NULL) {
1956 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
1960 /* queue heap initialize */
1961 pool->num_free = num;
1962 pool->num_alloc = 0;
1964 LIST_INIT(&pool->alloc_list);
1965 LIST_INIT(&pool->free_list);
1967 /* Initialize element */
1971 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1976 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1978 struct pool_entry *entry;
1983 LIST_FOREACH(entry, &pool->alloc_list, next) {
1984 LIST_REMOVE(entry, next);
1988 LIST_FOREACH(entry, &pool->free_list, next) {
1989 LIST_REMOVE(entry, next);
1994 pool->num_alloc = 0;
1996 LIST_INIT(&pool->alloc_list);
1997 LIST_INIT(&pool->free_list);
2001 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2004 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2005 uint32_t pool_offset;
2009 PMD_DRV_LOG(ERR, "Invalid parameter");
2013 pool_offset = base - pool->base;
2014 /* Lookup in alloc list */
2015 LIST_FOREACH(entry, &pool->alloc_list, next) {
2016 if (entry->base == pool_offset) {
2017 valid_entry = entry;
2018 LIST_REMOVE(entry, next);
2023 /* Not find, return */
2024 if (valid_entry == NULL) {
2025 PMD_DRV_LOG(ERR, "Failed to find entry");
2030 * Found it, move it to free list and try to merge.
2031 * In order to make merge easier, always sort it by qbase.
2032 * Find adjacent prev and last entries.
2035 LIST_FOREACH(entry, &pool->free_list, next) {
2036 if (entry->base > valid_entry->base) {
2044 /* Try to merge with next one*/
2046 /* Merge with next one */
2047 if (valid_entry->base + valid_entry->len == next->base) {
2048 next->base = valid_entry->base;
2049 next->len += valid_entry->len;
2050 rte_free(valid_entry);
2057 /* Merge with previous one */
2058 if (prev->base + prev->len == valid_entry->base) {
2059 prev->len += valid_entry->len;
2060 /* If it merge with next one, remove next node */
2062 LIST_REMOVE(valid_entry, next);
2063 rte_free(valid_entry);
2065 rte_free(valid_entry);
2071 /* Not find any entry to merge, insert */
2074 LIST_INSERT_AFTER(prev, valid_entry, next);
2075 else if (next != NULL)
2076 LIST_INSERT_BEFORE(next, valid_entry, next);
2077 else /* It's empty list, insert to head */
2078 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2081 pool->num_free += valid_entry->len;
2082 pool->num_alloc -= valid_entry->len;
2088 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2091 struct pool_entry *entry, *valid_entry;
2093 if (pool == NULL || num == 0) {
2094 PMD_DRV_LOG(ERR, "Invalid parameter");
2098 if (pool->num_free < num) {
2099 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2100 num, pool->num_free);
2105 /* Lookup in free list and find most fit one */
2106 LIST_FOREACH(entry, &pool->free_list, next) {
2107 if (entry->len >= num) {
2109 if (entry->len == num) {
2110 valid_entry = entry;
2113 if (valid_entry == NULL || valid_entry->len > entry->len)
2114 valid_entry = entry;
2118 /* Not find one to satisfy the request, return */
2119 if (valid_entry == NULL) {
2120 PMD_DRV_LOG(ERR, "No valid entry found");
2124 * The entry have equal queue number as requested,
2125 * remove it from alloc_list.
2127 if (valid_entry->len == num) {
2128 LIST_REMOVE(valid_entry, next);
2131 * The entry have more numbers than requested,
2132 * create a new entry for alloc_list and minus its
2133 * queue base and number in free_list.
2135 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2136 if (entry == NULL) {
2137 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2141 entry->base = valid_entry->base;
2143 valid_entry->base += num;
2144 valid_entry->len -= num;
2145 valid_entry = entry;
2148 /* Insert it into alloc list, not sorted */
2149 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2151 pool->num_free -= valid_entry->len;
2152 pool->num_alloc += valid_entry->len;
2154 return (valid_entry->base + pool->base);
2158 * bitmap_is_subset - Check whether src2 is subset of src1
2161 bitmap_is_subset(uint8_t src1, uint8_t src2)
2163 return !((src1 ^ src2) & src2);
2167 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2169 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2171 /* If DCB is not supported, only default TC is supported */
2172 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2173 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2177 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2178 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2179 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2183 return I40E_SUCCESS;
2187 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2188 struct i40e_vsi_vlan_pvid_info *info)
2191 struct i40e_vsi_context ctxt;
2192 uint8_t vlan_flags = 0;
2195 if (vsi == NULL || info == NULL) {
2196 PMD_DRV_LOG(ERR, "invalid parameters");
2197 return I40E_ERR_PARAM;
2201 vsi->info.pvid = info->config.pvid;
2203 * If insert pvid is enabled, only tagged pkts are
2204 * allowed to be sent out.
2206 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2207 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2210 if (info->config.reject.tagged == 0)
2211 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2213 if (info->config.reject.untagged == 0)
2214 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2216 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2217 I40E_AQ_VSI_PVLAN_MODE_MASK);
2218 vsi->info.port_vlan_flags |= vlan_flags;
2219 vsi->info.valid_sections =
2220 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2221 memset(&ctxt, 0, sizeof(ctxt));
2222 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2223 ctxt.seid = vsi->seid;
2225 hw = I40E_VSI_TO_HW(vsi);
2226 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2227 if (ret != I40E_SUCCESS)
2228 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2234 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2236 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2238 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2240 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2241 if (ret != I40E_SUCCESS)
2245 PMD_DRV_LOG(ERR, "seid not valid");
2249 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2250 tc_bw_data.tc_valid_bits = enabled_tcmap;
2251 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2252 tc_bw_data.tc_bw_credits[i] =
2253 (enabled_tcmap & (1 << i)) ? 1 : 0;
2255 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2256 if (ret != I40E_SUCCESS) {
2257 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2261 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2262 sizeof(vsi->info.qs_handle));
2263 return I40E_SUCCESS;
2267 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2268 struct i40e_aqc_vsi_properties_data *info,
2269 uint8_t enabled_tcmap)
2271 int ret, total_tc = 0, i;
2272 uint16_t qpnum_per_tc, bsf, qp_idx;
2274 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2275 if (ret != I40E_SUCCESS)
2278 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2279 if (enabled_tcmap & (1 << i))
2281 vsi->enabled_tc = enabled_tcmap;
2283 /* Number of queues per enabled TC */
2284 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2285 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2286 bsf = rte_bsf32(qpnum_per_tc);
2288 /* Adjust the queue number to actual queues that can be applied */
2289 vsi->nb_qps = qpnum_per_tc * total_tc;
2292 * Configure TC and queue mapping parameters, for enabled TC,
2293 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2294 * default queue will serve it.
2297 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2298 if (vsi->enabled_tc & (1 << i)) {
2299 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2300 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2301 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2302 qp_idx += qpnum_per_tc;
2304 info->tc_mapping[i] = 0;
2307 /* Associate queue number with VSI */
2308 if (vsi->type == I40E_VSI_SRIOV) {
2309 info->mapping_flags |=
2310 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2311 for (i = 0; i < vsi->nb_qps; i++)
2312 info->queue_mapping[i] =
2313 rte_cpu_to_le_16(vsi->base_queue + i);
2315 info->mapping_flags |=
2316 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2317 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2319 info->valid_sections =
2320 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2322 return I40E_SUCCESS;
2326 i40e_veb_release(struct i40e_veb *veb)
2328 struct i40e_vsi *vsi;
2331 if (veb == NULL || veb->associate_vsi == NULL)
2334 if (!TAILQ_EMPTY(&veb->head)) {
2335 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2339 vsi = veb->associate_vsi;
2340 hw = I40E_VSI_TO_HW(vsi);
2342 vsi->uplink_seid = veb->uplink_seid;
2343 i40e_aq_delete_element(hw, veb->seid, NULL);
2346 return I40E_SUCCESS;
2350 static struct i40e_veb *
2351 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2353 struct i40e_veb *veb;
2357 if (NULL == pf || vsi == NULL) {
2358 PMD_DRV_LOG(ERR, "veb setup failed, "
2359 "associated VSI shouldn't null");
2362 hw = I40E_PF_TO_HW(pf);
2364 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2366 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2370 veb->associate_vsi = vsi;
2371 TAILQ_INIT(&veb->head);
2372 veb->uplink_seid = vsi->uplink_seid;
2374 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2375 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2377 if (ret != I40E_SUCCESS) {
2378 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2379 hw->aq.asq_last_status);
2383 /* get statistics index */
2384 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2385 &veb->stats_idx, NULL, NULL, NULL);
2386 if (ret != I40E_SUCCESS) {
2387 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2388 hw->aq.asq_last_status);
2392 /* Get VEB bandwidth, to be implemented */
2393 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2394 vsi->uplink_seid = veb->seid;
2403 i40e_vsi_release(struct i40e_vsi *vsi)
2407 struct i40e_vsi_list *vsi_list;
2409 struct i40e_mac_filter *f;
2412 return I40E_SUCCESS;
2414 pf = I40E_VSI_TO_PF(vsi);
2415 hw = I40E_VSI_TO_HW(vsi);
2417 /* VSI has child to attach, release child first */
2419 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2420 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2422 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2424 i40e_veb_release(vsi->veb);
2427 /* Remove all macvlan filters of the VSI */
2428 i40e_vsi_remove_all_macvlan_filter(vsi);
2429 TAILQ_FOREACH(f, &vsi->mac_list, next)
2432 if (vsi->type != I40E_VSI_MAIN) {
2433 /* Remove vsi from parent's sibling list */
2434 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2435 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2436 return I40E_ERR_PARAM;
2438 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2439 &vsi->sib_vsi_list, list);
2441 /* Remove all switch element of the VSI */
2442 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2443 if (ret != I40E_SUCCESS)
2444 PMD_DRV_LOG(ERR, "Failed to delete element");
2446 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2448 if (vsi->type != I40E_VSI_SRIOV)
2449 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2452 return I40E_SUCCESS;
2456 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2458 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2459 struct i40e_aqc_remove_macvlan_element_data def_filter;
2462 if (vsi->type != I40E_VSI_MAIN)
2463 return I40E_ERR_CONFIG;
2464 memset(&def_filter, 0, sizeof(def_filter));
2465 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2467 def_filter.vlan_tag = 0;
2468 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2469 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2470 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2471 if (ret != I40E_SUCCESS) {
2472 struct i40e_mac_filter *f;
2474 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2476 /* It needs to add the permanent mac into mac list */
2477 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2479 PMD_DRV_LOG(ERR, "failed to allocate memory");
2480 return I40E_ERR_NO_MEMORY;
2482 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2484 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2490 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2494 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2496 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2497 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2498 struct i40e_hw *hw = &vsi->adapter->hw;
2502 memset(&bw_config, 0, sizeof(bw_config));
2503 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2504 if (ret != I40E_SUCCESS) {
2505 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2506 hw->aq.asq_last_status);
2510 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2511 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2512 &ets_sla_config, NULL);
2513 if (ret != I40E_SUCCESS) {
2514 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2515 "configuration %u", hw->aq.asq_last_status);
2519 /* Not store the info yet, just print out */
2520 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2521 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2522 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2523 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2524 ets_sla_config.share_credits[i]);
2525 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2526 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2527 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2528 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2537 i40e_vsi_setup(struct i40e_pf *pf,
2538 enum i40e_vsi_type type,
2539 struct i40e_vsi *uplink_vsi,
2540 uint16_t user_param)
2542 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2543 struct i40e_vsi *vsi;
2545 struct i40e_vsi_context ctxt;
2546 struct ether_addr broadcast =
2547 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2549 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2550 PMD_DRV_LOG(ERR, "VSI setup failed, "
2551 "VSI link shouldn't be NULL");
2555 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2556 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2557 "uplink VSI should be NULL");
2561 /* If uplink vsi didn't setup VEB, create one first */
2562 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2563 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2565 if (NULL == uplink_vsi->veb) {
2566 PMD_DRV_LOG(ERR, "VEB setup failed");
2571 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2573 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2576 TAILQ_INIT(&vsi->mac_list);
2578 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2579 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2580 vsi->parent_vsi = uplink_vsi;
2581 vsi->user_param = user_param;
2582 /* Allocate queues */
2583 switch (vsi->type) {
2584 case I40E_VSI_MAIN :
2585 vsi->nb_qps = pf->lan_nb_qps;
2587 case I40E_VSI_SRIOV :
2588 vsi->nb_qps = pf->vf_nb_qps;
2593 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2595 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2599 vsi->base_queue = ret;
2601 /* VF has MSIX interrupt in VF range, don't allocate here */
2602 if (type != I40E_VSI_SRIOV) {
2603 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2605 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2606 goto fail_queue_alloc;
2608 vsi->msix_intr = ret;
2612 if (type == I40E_VSI_MAIN) {
2613 /* For main VSI, no need to add since it's default one */
2614 vsi->uplink_seid = pf->mac_seid;
2615 vsi->seid = pf->main_vsi_seid;
2616 /* Bind queues with specific MSIX interrupt */
2618 * Needs 2 interrupt at least, one for misc cause which will
2619 * enabled from OS side, Another for queues binding the
2620 * interrupt from device side only.
2623 /* Get default VSI parameters from hardware */
2624 memset(&ctxt, 0, sizeof(ctxt));
2625 ctxt.seid = vsi->seid;
2626 ctxt.pf_num = hw->pf_id;
2627 ctxt.uplink_seid = vsi->uplink_seid;
2629 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2630 if (ret != I40E_SUCCESS) {
2631 PMD_DRV_LOG(ERR, "Failed to get VSI params");
2632 goto fail_msix_alloc;
2634 (void)rte_memcpy(&vsi->info, &ctxt.info,
2635 sizeof(struct i40e_aqc_vsi_properties_data));
2636 vsi->vsi_id = ctxt.vsi_number;
2637 vsi->info.valid_sections = 0;
2639 /* Configure tc, enabled TC0 only */
2640 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2642 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2643 goto fail_msix_alloc;
2646 /* TC, queue mapping */
2647 memset(&ctxt, 0, sizeof(ctxt));
2648 vsi->info.valid_sections |=
2649 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2650 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2651 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2652 (void)rte_memcpy(&ctxt.info, &vsi->info,
2653 sizeof(struct i40e_aqc_vsi_properties_data));
2654 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2655 I40E_DEFAULT_TCMAP);
2656 if (ret != I40E_SUCCESS) {
2657 PMD_DRV_LOG(ERR, "Failed to configure "
2658 "TC queue mapping");
2659 goto fail_msix_alloc;
2661 ctxt.seid = vsi->seid;
2662 ctxt.pf_num = hw->pf_id;
2663 ctxt.uplink_seid = vsi->uplink_seid;
2666 /* Update VSI parameters */
2667 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2668 if (ret != I40E_SUCCESS) {
2669 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2670 goto fail_msix_alloc;
2673 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2674 sizeof(vsi->info.tc_mapping));
2675 (void)rte_memcpy(&vsi->info.queue_mapping,
2676 &ctxt.info.queue_mapping,
2677 sizeof(vsi->info.queue_mapping));
2678 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2679 vsi->info.valid_sections = 0;
2681 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2685 * Updating default filter settings are necessary to prevent
2686 * reception of tagged packets.
2687 * Some old firmware configurations load a default macvlan
2688 * filter which accepts both tagged and untagged packets.
2689 * The updating is to use a normal filter instead if needed.
2690 * For NVM 4.2.2 or after, the updating is not needed anymore.
2691 * The firmware with correct configurations load the default
2692 * macvlan filter which is expected and cannot be removed.
2694 i40e_update_default_filter_setting(vsi);
2695 } else if (type == I40E_VSI_SRIOV) {
2696 memset(&ctxt, 0, sizeof(ctxt));
2698 * For other VSI, the uplink_seid equals to uplink VSI's
2699 * uplink_seid since they share same VEB
2701 vsi->uplink_seid = uplink_vsi->uplink_seid;
2702 ctxt.pf_num = hw->pf_id;
2703 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2704 ctxt.uplink_seid = vsi->uplink_seid;
2705 ctxt.connection_type = 0x1;
2706 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2708 /* Configure switch ID */
2709 ctxt.info.valid_sections |=
2710 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2711 ctxt.info.switch_id =
2712 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2713 /* Configure port/vlan */
2714 ctxt.info.valid_sections |=
2715 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2716 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2717 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2718 I40E_DEFAULT_TCMAP);
2719 if (ret != I40E_SUCCESS) {
2720 PMD_DRV_LOG(ERR, "Failed to configure "
2721 "TC queue mapping");
2722 goto fail_msix_alloc;
2724 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2725 ctxt.info.valid_sections |=
2726 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2728 * Since VSI is not created yet, only configure parameter,
2729 * will add vsi below.
2733 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2734 goto fail_msix_alloc;
2737 if (vsi->type != I40E_VSI_MAIN) {
2738 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2740 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2741 hw->aq.asq_last_status);
2742 goto fail_msix_alloc;
2744 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2745 vsi->info.valid_sections = 0;
2746 vsi->seid = ctxt.seid;
2747 vsi->vsi_id = ctxt.vsi_number;
2748 vsi->sib_vsi_list.vsi = vsi;
2749 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2750 &vsi->sib_vsi_list, list);
2753 /* MAC/VLAN configuration */
2754 ret = i40e_vsi_add_mac(vsi, &broadcast);
2755 if (ret != I40E_SUCCESS) {
2756 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2757 goto fail_msix_alloc;
2760 /* Get VSI BW information */
2761 i40e_vsi_dump_bw_config(vsi);
2764 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2766 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2772 /* Configure vlan stripping on or off */
2774 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2776 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2777 struct i40e_vsi_context ctxt;
2779 int ret = I40E_SUCCESS;
2781 /* Check if it has been already on or off */
2782 if (vsi->info.valid_sections &
2783 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2785 if ((vsi->info.port_vlan_flags &
2786 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2787 return 0; /* already on */
2789 if ((vsi->info.port_vlan_flags &
2790 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2791 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2792 return 0; /* already off */
2797 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2799 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2800 vsi->info.valid_sections =
2801 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2802 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2803 vsi->info.port_vlan_flags |= vlan_flags;
2804 ctxt.seid = vsi->seid;
2805 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2806 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2808 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2809 on ? "enable" : "disable");
2815 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2817 struct rte_eth_dev_data *data = dev->data;
2820 /* Apply vlan offload setting */
2821 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2823 /* Apply double-vlan setting, not implemented yet */
2825 /* Apply pvid setting */
2826 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2827 data->dev_conf.txmode.hw_vlan_insert_pvid);
2829 PMD_DRV_LOG(INFO, "Failed to update VSI params");
2835 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2837 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2839 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2843 i40e_update_flow_control(struct i40e_hw *hw)
2845 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2846 struct i40e_link_status link_status;
2847 uint32_t rxfc = 0, txfc = 0, reg;
2851 memset(&link_status, 0, sizeof(link_status));
2852 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2853 if (ret != I40E_SUCCESS) {
2854 PMD_DRV_LOG(ERR, "Failed to get link status information");
2855 goto write_reg; /* Disable flow control */
2858 an_info = hw->phy.link_info.an_info;
2859 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2860 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
2861 ret = I40E_ERR_NOT_READY;
2862 goto write_reg; /* Disable flow control */
2865 * If link auto negotiation is enabled, flow control needs to
2866 * be configured according to it
2868 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2869 case I40E_LINK_PAUSE_RXTX:
2872 hw->fc.current_mode = I40E_FC_FULL;
2874 case I40E_AQ_LINK_PAUSE_RX:
2876 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2878 case I40E_AQ_LINK_PAUSE_TX:
2880 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2883 hw->fc.current_mode = I40E_FC_NONE;
2888 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2889 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2890 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2891 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2892 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2893 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2900 i40e_pf_setup(struct i40e_pf *pf)
2902 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2903 struct i40e_filter_control_settings settings;
2904 struct rte_eth_dev_data *dev_data = pf->dev_data;
2905 struct i40e_vsi *vsi;
2908 /* Clear all stats counters */
2909 pf->offset_loaded = FALSE;
2910 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2911 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2913 ret = i40e_pf_get_switch_config(pf);
2914 if (ret != I40E_SUCCESS) {
2915 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2920 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2922 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2923 return I40E_ERR_NOT_READY;
2926 dev_data->nb_rx_queues = vsi->nb_qps;
2927 dev_data->nb_tx_queues = vsi->nb_qps;
2929 /* Configure filter control */
2930 memset(&settings, 0, sizeof(settings));
2931 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2932 /* Enable ethtype and macvlan filters */
2933 settings.enable_ethtype = TRUE;
2934 settings.enable_macvlan = TRUE;
2935 ret = i40e_set_filter_control(hw, &settings);
2937 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2940 /* Update flow control according to the auto negotiation */
2941 i40e_update_flow_control(hw);
2943 return I40E_SUCCESS;
2947 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2953 * Set or clear TX Queue Disable flags,
2954 * which is required by hardware.
2956 i40e_pre_tx_queue_cfg(hw, q_idx, on);
2957 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
2959 /* Wait until the request is finished */
2960 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2961 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2962 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2963 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2964 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2970 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2971 return I40E_SUCCESS; /* already on, skip next steps */
2973 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
2974 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2976 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2977 return I40E_SUCCESS; /* already off, skip next steps */
2978 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2980 /* Write the register */
2981 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2982 /* Check the result */
2983 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2984 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2985 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2987 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2988 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2991 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2992 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2996 /* Check if it is timeout */
2997 if (j >= I40E_CHK_Q_ENA_COUNT) {
2998 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
2999 (on ? "enable" : "disable"), q_idx);
3000 return I40E_ERR_TIMEOUT;
3003 return I40E_SUCCESS;
3006 /* Swith on or off the tx queues */
3008 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3010 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3011 struct i40e_tx_queue *txq;
3012 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3016 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3017 txq = dev_data->tx_queues[i];
3018 /* Don't operate the queue if not configured or
3019 * if starting only per queue */
3020 if (!txq->q_set || (on && txq->tx_deferred_start))
3023 ret = i40e_dev_tx_queue_start(dev, i);
3025 ret = i40e_dev_tx_queue_stop(dev, i);
3026 if ( ret != I40E_SUCCESS)
3030 return I40E_SUCCESS;
3034 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3039 /* Wait until the request is finished */
3040 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3041 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3042 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3043 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3044 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3049 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3050 return I40E_SUCCESS; /* Already on, skip next steps */
3051 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3053 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3054 return I40E_SUCCESS; /* Already off, skip next steps */
3055 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3058 /* Write the register */
3059 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3060 /* Check the result */
3061 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3062 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3063 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3065 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3066 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3069 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3070 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3075 /* Check if it is timeout */
3076 if (j >= I40E_CHK_Q_ENA_COUNT) {
3077 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3078 (on ? "enable" : "disable"), q_idx);
3079 return I40E_ERR_TIMEOUT;
3082 return I40E_SUCCESS;
3084 /* Switch on or off the rx queues */
3086 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3088 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3089 struct i40e_rx_queue *rxq;
3090 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3094 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3095 rxq = dev_data->rx_queues[i];
3096 /* Don't operate the queue if not configured or
3097 * if starting only per queue */
3098 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3101 ret = i40e_dev_rx_queue_start(dev, i);
3103 ret = i40e_dev_rx_queue_stop(dev, i);
3104 if (ret != I40E_SUCCESS)
3108 return I40E_SUCCESS;
3111 /* Switch on or off all the rx/tx queues */
3113 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3118 /* enable rx queues before enabling tx queues */
3119 ret = i40e_vsi_switch_rx_queues(vsi, on);
3121 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3124 ret = i40e_vsi_switch_tx_queues(vsi, on);
3126 /* Stop tx queues before stopping rx queues */
3127 ret = i40e_vsi_switch_tx_queues(vsi, on);
3129 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3132 ret = i40e_vsi_switch_rx_queues(vsi, on);
3138 /* Initialize VSI for TX */
3140 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3142 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3143 struct rte_eth_dev_data *data = pf->dev_data;
3145 uint32_t ret = I40E_SUCCESS;
3147 for (i = 0; i < data->nb_tx_queues; i++) {
3148 ret = i40e_tx_queue_init(data->tx_queues[i]);
3149 if (ret != I40E_SUCCESS)
3156 /* Initialize VSI for RX */
3158 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3160 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3161 struct rte_eth_dev_data *data = pf->dev_data;
3162 int ret = I40E_SUCCESS;
3165 i40e_pf_config_mq_rx(pf);
3166 for (i = 0; i < data->nb_rx_queues; i++) {
3167 ret = i40e_rx_queue_init(data->rx_queues[i]);
3168 if (ret != I40E_SUCCESS) {
3169 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3178 /* Initialize VSI */
3180 i40e_vsi_init(struct i40e_vsi *vsi)
3184 err = i40e_vsi_tx_init(vsi);
3186 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3189 err = i40e_vsi_rx_init(vsi);
3191 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3199 i40e_stat_update_32(struct i40e_hw *hw,
3207 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3211 if (new_data >= *offset)
3212 *stat = (uint64_t)(new_data - *offset);
3214 *stat = (uint64_t)((new_data +
3215 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3219 i40e_stat_update_48(struct i40e_hw *hw,
3228 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3229 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3230 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3235 if (new_data >= *offset)
3236 *stat = new_data - *offset;
3238 *stat = (uint64_t)((new_data +
3239 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3241 *stat &= I40E_48_BIT_MASK;
3246 i40e_pf_disable_irq0(struct i40e_hw *hw)
3248 /* Disable all interrupt types */
3249 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3250 I40E_WRITE_FLUSH(hw);
3255 i40e_pf_enable_irq0(struct i40e_hw *hw)
3257 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3258 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3259 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3260 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3261 I40E_WRITE_FLUSH(hw);
3265 i40e_pf_config_irq0(struct i40e_hw *hw)
3269 /* read pending request and disable first */
3270 i40e_pf_disable_irq0(hw);
3272 * Enable all interrupt error options to detect possible errors,
3273 * other informative int are ignored
3275 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3276 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3277 I40E_PFINT_ICR0_ENA_GRST_MASK |
3278 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3279 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3280 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3281 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3282 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3284 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3285 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3286 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3288 /* Link no queues with irq0 */
3289 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3290 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3294 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3296 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3297 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3300 uint32_t index, offset, val;
3305 * Try to find which VF trigger a reset, use absolute VF id to access
3306 * since the reg is global register.
3308 for (i = 0; i < pf->vf_num; i++) {
3309 abs_vf_id = hw->func_caps.vf_base_id + i;
3310 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3311 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3312 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3313 /* VFR event occured */
3314 if (val & (0x1 << offset)) {
3317 /* Clear the event first */
3318 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3320 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3322 * Only notify a VF reset event occured,
3323 * don't trigger another SW reset
3325 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3326 if (ret != I40E_SUCCESS)
3327 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3333 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3335 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3336 struct i40e_arq_event_info info;
3337 uint16_t pending, opcode;
3340 info.msg_size = I40E_AQ_BUF_SZ;
3341 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3342 if (!info.msg_buf) {
3343 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3349 ret = i40e_clean_arq_element(hw, &info, &pending);
3351 if (ret != I40E_SUCCESS) {
3352 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3353 "aq_err: %u", hw->aq.asq_last_status);
3356 opcode = rte_le_to_cpu_16(info.desc.opcode);
3359 case i40e_aqc_opc_send_msg_to_pf:
3360 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3361 i40e_pf_host_handle_vf_msg(dev,
3362 rte_le_to_cpu_16(info.desc.retval),
3363 rte_le_to_cpu_32(info.desc.cookie_high),
3364 rte_le_to_cpu_32(info.desc.cookie_low),
3369 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3373 /* Reset the buffer after processing one */
3374 info.msg_size = I40E_AQ_BUF_SZ;
3376 rte_free(info.msg_buf);
3380 * Interrupt handler triggered by NIC for handling
3381 * specific interrupt.
3384 * Pointer to interrupt handle.
3386 * The address of parameter (struct rte_eth_dev *) regsitered before.
3392 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3395 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3396 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3397 uint32_t cause, enable;
3399 i40e_pf_disable_irq0(hw);
3401 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3402 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3404 /* Shared IRQ case, return */
3405 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3406 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3407 "no INT event to process", hw->pf_id);
3411 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3412 PMD_DRV_LOG(INFO, "INT:Link status changed");
3413 i40e_dev_link_update(dev, 0);
3416 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3417 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
3419 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3420 PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
3422 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3423 PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
3425 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3426 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
3428 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3429 PMD_DRV_LOG(INFO, "INT:HMC error occured");
3431 /* Add processing func to deal with VF reset vent */
3432 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3433 PMD_DRV_LOG(INFO, "INT:VF reset detected");
3434 i40e_dev_handle_vfr_event(dev);
3436 /* Find admin queue event */
3437 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3438 PMD_DRV_LOG(INFO, "INT:ADMINQ event");
3439 i40e_dev_handle_aq_msg(dev);
3443 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3444 /* Re-enable interrupt from device side */
3445 i40e_pf_enable_irq0(hw);
3446 /* Re-enable interrupt from host side */
3447 rte_intr_enable(&(dev->pci_dev->intr_handle));
3451 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3452 struct i40e_macvlan_filter *filter,
3455 int ele_num, ele_buff_size;
3456 int num, actual_num, i;
3457 int ret = I40E_SUCCESS;
3458 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3459 struct i40e_aqc_add_macvlan_element_data *req_list;
3461 if (filter == NULL || total == 0)
3462 return I40E_ERR_PARAM;
3463 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3464 ele_buff_size = hw->aq.asq_buf_size;
3466 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3467 if (req_list == NULL) {
3468 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3469 return I40E_ERR_NO_MEMORY;
3474 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3475 memset(req_list, 0, ele_buff_size);
3477 for (i = 0; i < actual_num; i++) {
3478 (void)rte_memcpy(req_list[i].mac_addr,
3479 &filter[num + i].macaddr, ETH_ADDR_LEN);
3480 req_list[i].vlan_tag =
3481 rte_cpu_to_le_16(filter[num + i].vlan_id);
3482 req_list[i].flags = rte_cpu_to_le_16(\
3483 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3484 req_list[i].queue_number = 0;
3487 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3489 if (ret != I40E_SUCCESS) {
3490 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3494 } while (num < total);
3502 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3503 struct i40e_macvlan_filter *filter,
3506 int ele_num, ele_buff_size;
3507 int num, actual_num, i;
3508 int ret = I40E_SUCCESS;
3509 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3510 struct i40e_aqc_remove_macvlan_element_data *req_list;
3512 if (filter == NULL || total == 0)
3513 return I40E_ERR_PARAM;
3515 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3516 ele_buff_size = hw->aq.asq_buf_size;
3518 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3519 if (req_list == NULL) {
3520 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3521 return I40E_ERR_NO_MEMORY;
3526 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3527 memset(req_list, 0, ele_buff_size);
3529 for (i = 0; i < actual_num; i++) {
3530 (void)rte_memcpy(req_list[i].mac_addr,
3531 &filter[num + i].macaddr, ETH_ADDR_LEN);
3532 req_list[i].vlan_tag =
3533 rte_cpu_to_le_16(filter[num + i].vlan_id);
3534 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3537 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3539 if (ret != I40E_SUCCESS) {
3540 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3544 } while (num < total);
3551 /* Find out specific MAC filter */
3552 static struct i40e_mac_filter *
3553 i40e_find_mac_filter(struct i40e_vsi *vsi,
3554 struct ether_addr *macaddr)
3556 struct i40e_mac_filter *f;
3558 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3559 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3567 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3570 uint32_t vid_idx, vid_bit;
3572 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3573 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3575 if (vsi->vfta[vid_idx] & vid_bit)
3582 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3583 uint16_t vlan_id, bool on)
3585 uint32_t vid_idx, vid_bit;
3587 #define UINT32_BIT_MASK 0x1F
3588 #define VALID_VLAN_BIT_MASK 0xFFF
3589 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3590 * element first, then find the bits it belongs to
3592 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3594 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3597 vsi->vfta[vid_idx] |= vid_bit;
3599 vsi->vfta[vid_idx] &= ~vid_bit;
3603 * Find all vlan options for specific mac addr,
3604 * return with actual vlan found.
3607 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3608 struct i40e_macvlan_filter *mv_f,
3609 int num, struct ether_addr *addr)
3615 * Not to use i40e_find_vlan_filter to decrease the loop time,
3616 * although the code looks complex.
3618 if (num < vsi->vlan_num)
3619 return I40E_ERR_PARAM;
3622 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3624 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3625 if (vsi->vfta[j] & (1 << k)) {
3627 PMD_DRV_LOG(ERR, "vlan number "
3629 return I40E_ERR_PARAM;
3631 (void)rte_memcpy(&mv_f[i].macaddr,
3632 addr, ETH_ADDR_LEN);
3634 j * I40E_UINT32_BIT_SIZE + k;
3640 return I40E_SUCCESS;
3644 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3645 struct i40e_macvlan_filter *mv_f,
3650 struct i40e_mac_filter *f;
3652 if (num < vsi->mac_num)
3653 return I40E_ERR_PARAM;
3655 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3657 PMD_DRV_LOG(ERR, "buffer number not match");
3658 return I40E_ERR_PARAM;
3660 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3661 mv_f[i].vlan_id = vlan;
3665 return I40E_SUCCESS;
3669 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3672 struct i40e_mac_filter *f;
3673 struct i40e_macvlan_filter *mv_f;
3674 int ret = I40E_SUCCESS;
3676 if (vsi == NULL || vsi->mac_num == 0)
3677 return I40E_ERR_PARAM;
3679 /* Case that no vlan is set */
3680 if (vsi->vlan_num == 0)
3683 num = vsi->mac_num * vsi->vlan_num;
3685 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3687 PMD_DRV_LOG(ERR, "failed to allocate memory");
3688 return I40E_ERR_NO_MEMORY;
3692 if (vsi->vlan_num == 0) {
3693 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3694 (void)rte_memcpy(&mv_f[i].macaddr,
3695 &f->macaddr, ETH_ADDR_LEN);
3696 mv_f[i].vlan_id = 0;
3700 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3701 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3702 vsi->vlan_num, &f->macaddr);
3703 if (ret != I40E_SUCCESS)
3709 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3717 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3719 struct i40e_macvlan_filter *mv_f;
3721 int ret = I40E_SUCCESS;
3723 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3724 return I40E_ERR_PARAM;
3726 /* If it's already set, just return */
3727 if (i40e_find_vlan_filter(vsi,vlan))
3728 return I40E_SUCCESS;
3730 mac_num = vsi->mac_num;
3733 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3734 return I40E_ERR_PARAM;
3737 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3740 PMD_DRV_LOG(ERR, "failed to allocate memory");
3741 return I40E_ERR_NO_MEMORY;
3744 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3746 if (ret != I40E_SUCCESS)
3749 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3751 if (ret != I40E_SUCCESS)
3754 i40e_set_vlan_filter(vsi, vlan, 1);
3764 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3766 struct i40e_macvlan_filter *mv_f;
3768 int ret = I40E_SUCCESS;
3771 * Vlan 0 is the generic filter for untagged packets
3772 * and can't be removed.
3774 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3775 return I40E_ERR_PARAM;
3777 /* If can't find it, just return */
3778 if (!i40e_find_vlan_filter(vsi, vlan))
3779 return I40E_ERR_PARAM;
3781 mac_num = vsi->mac_num;
3784 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3785 return I40E_ERR_PARAM;
3788 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3791 PMD_DRV_LOG(ERR, "failed to allocate memory");
3792 return I40E_ERR_NO_MEMORY;
3795 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3797 if (ret != I40E_SUCCESS)
3800 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3802 if (ret != I40E_SUCCESS)
3805 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3806 if (vsi->vlan_num == 1) {
3807 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3808 if (ret != I40E_SUCCESS)
3811 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3812 if (ret != I40E_SUCCESS)
3816 i40e_set_vlan_filter(vsi, vlan, 0);
3826 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3828 struct i40e_mac_filter *f;
3829 struct i40e_macvlan_filter *mv_f;
3831 int ret = I40E_SUCCESS;
3833 /* If it's add and we've config it, return */
3834 f = i40e_find_mac_filter(vsi, addr);
3836 return I40E_SUCCESS;
3839 * If vlan_num is 0, that's the first time to add mac,
3840 * set mask for vlan_id 0.
3842 if (vsi->vlan_num == 0) {
3843 i40e_set_vlan_filter(vsi, 0, 1);
3847 vlan_num = vsi->vlan_num;
3849 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3851 PMD_DRV_LOG(ERR, "failed to allocate memory");
3852 return I40E_ERR_NO_MEMORY;
3855 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3856 if (ret != I40E_SUCCESS)
3859 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3860 if (ret != I40E_SUCCESS)
3863 /* Add the mac addr into mac list */
3864 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3866 PMD_DRV_LOG(ERR, "failed to allocate memory");
3867 ret = I40E_ERR_NO_MEMORY;
3870 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3871 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3882 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3884 struct i40e_mac_filter *f;
3885 struct i40e_macvlan_filter *mv_f;
3887 int ret = I40E_SUCCESS;
3889 /* Can't find it, return an error */
3890 f = i40e_find_mac_filter(vsi, addr);
3892 return I40E_ERR_PARAM;
3894 vlan_num = vsi->vlan_num;
3895 if (vlan_num == 0) {
3896 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
3897 return I40E_ERR_PARAM;
3899 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3901 PMD_DRV_LOG(ERR, "failed to allocate memory");
3902 return I40E_ERR_NO_MEMORY;
3905 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3906 if (ret != I40E_SUCCESS)
3909 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3910 if (ret != I40E_SUCCESS)
3913 /* Remove the mac addr into mac list */
3914 TAILQ_REMOVE(&vsi->mac_list, f, next);
3924 /* Configure hash enable flags for RSS */
3926 i40e_config_hena(uint64_t flags)
3933 if (flags & ETH_RSS_NONF_IPV4_UDP)
3934 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3935 if (flags & ETH_RSS_NONF_IPV4_TCP)
3936 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3937 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3938 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3939 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3940 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3941 if (flags & ETH_RSS_FRAG_IPV4)
3942 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3943 if (flags & ETH_RSS_NONF_IPV6_UDP)
3944 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3945 if (flags & ETH_RSS_NONF_IPV6_TCP)
3946 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3947 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3948 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3949 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3950 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3951 if (flags & ETH_RSS_FRAG_IPV6)
3952 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3953 if (flags & ETH_RSS_L2_PAYLOAD)
3954 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3959 /* Parse the hash enable flags */
3961 i40e_parse_hena(uint64_t flags)
3963 uint64_t rss_hf = 0;
3968 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3969 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3970 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3971 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3972 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3973 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3974 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3975 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3976 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3977 rss_hf |= ETH_RSS_FRAG_IPV4;
3978 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3979 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3980 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3981 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3982 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3983 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3984 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3985 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3986 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3987 rss_hf |= ETH_RSS_FRAG_IPV6;
3988 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3989 rss_hf |= ETH_RSS_L2_PAYLOAD;
3996 i40e_pf_disable_rss(struct i40e_pf *pf)
3998 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4001 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4002 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4003 hena &= ~I40E_RSS_HENA_ALL;
4004 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4005 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4006 I40E_WRITE_FLUSH(hw);
4010 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4013 uint8_t hash_key_len;
4018 hash_key = (uint32_t *)(rss_conf->rss_key);
4019 hash_key_len = rss_conf->rss_key_len;
4020 if (hash_key != NULL && hash_key_len >=
4021 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4022 /* Fill in RSS hash key */
4023 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4024 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4027 rss_hf = rss_conf->rss_hf;
4028 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4029 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4030 hena &= ~I40E_RSS_HENA_ALL;
4031 hena |= i40e_config_hena(rss_hf);
4032 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4033 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4034 I40E_WRITE_FLUSH(hw);
4040 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4041 struct rte_eth_rss_conf *rss_conf)
4043 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4044 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4047 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4048 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4049 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4050 if (rss_hf != 0) /* Enable RSS */
4052 return 0; /* Nothing to do */
4055 if (rss_hf == 0) /* Disable RSS */
4058 return i40e_hw_rss_hash_set(hw, rss_conf);
4062 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4063 struct rte_eth_rss_conf *rss_conf)
4065 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4066 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4070 if (hash_key != NULL) {
4071 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4072 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4073 rss_conf->rss_key_len = i * sizeof(uint32_t);
4075 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4076 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4077 rss_conf->rss_hf = i40e_parse_hena(hena);
4084 i40e_pf_config_rss(struct i40e_pf *pf)
4086 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4087 struct rte_eth_rss_conf rss_conf;
4088 uint32_t i, lut = 0;
4089 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4091 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4094 lut = (lut << 8) | (j & ((0x1 <<
4095 hw->func_caps.rss_table_entry_width) - 1));
4097 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4100 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4101 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4102 i40e_pf_disable_rss(pf);
4105 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4106 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4107 /* Calculate the default hash key */
4108 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4109 rss_key_default[i] = (uint32_t)rte_rand();
4110 rss_conf.rss_key = (uint8_t *)rss_key_default;
4111 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4115 return i40e_hw_rss_hash_set(hw, &rss_conf);
4119 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4121 if (!pf->dev_data->sriov.active) {
4122 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4124 i40e_pf_config_rss(pf);
4127 i40e_pf_disable_rss(pf);