4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
90 #define I40E_RSS_OFFLOAD_ALL ( \
91 ETH_RSS_NONF_IPV4_UDP | \
92 ETH_RSS_NONF_IPV4_TCP | \
93 ETH_RSS_NONF_IPV4_SCTP | \
94 ETH_RSS_NONF_IPV4_OTHER | \
96 ETH_RSS_NONF_IPV6_UDP | \
97 ETH_RSS_NONF_IPV6_TCP | \
98 ETH_RSS_NONF_IPV6_SCTP | \
99 ETH_RSS_NONF_IPV6_OTHER | \
100 ETH_RSS_FRAG_IPV6 | \
103 /* All bits of RSS hash enable */
104 #define I40E_RSS_HENA_ALL ( \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
109 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
112 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
113 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
114 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
116 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
117 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
118 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
120 static int eth_i40e_dev_init(\
121 __attribute__((unused)) struct eth_driver *eth_drv,
122 struct rte_eth_dev *eth_dev);
123 static int i40e_dev_configure(struct rte_eth_dev *dev);
124 static int i40e_dev_start(struct rte_eth_dev *dev);
125 static void i40e_dev_stop(struct rte_eth_dev *dev);
126 static void i40e_dev_close(struct rte_eth_dev *dev);
127 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
132 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
133 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
134 struct rte_eth_stats *stats);
135 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
136 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
140 static void i40e_dev_info_get(struct rte_eth_dev *dev,
141 struct rte_eth_dev_info *dev_info);
142 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
145 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
146 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
147 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
150 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
151 static int i40e_dev_led_on(struct rte_eth_dev *dev);
152 static int i40e_dev_led_off(struct rte_eth_dev *dev);
153 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
154 struct rte_eth_fc_conf *fc_conf);
155 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
156 struct rte_eth_pfc_conf *pfc_conf);
157 static void i40e_macaddr_add(struct rte_eth_dev *dev,
158 struct ether_addr *mac_addr,
161 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
162 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
163 struct rte_eth_rss_reta *reta_conf);
164 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
165 struct rte_eth_rss_reta *reta_conf);
167 static int i40e_get_cap(struct i40e_hw *hw);
168 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
169 static int i40e_pf_setup(struct i40e_pf *pf);
170 static int i40e_vsi_init(struct i40e_vsi *vsi);
171 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
172 bool offset_loaded, uint64_t *offset, uint64_t *stat);
173 static void i40e_stat_update_48(struct i40e_hw *hw,
179 static void i40e_pf_config_irq0(struct i40e_hw *hw);
180 static void i40e_dev_interrupt_handler(
181 __rte_unused struct rte_intr_handle *handle, void *param);
182 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
183 uint32_t base, uint32_t num);
184 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
185 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
187 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
189 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
190 static int i40e_veb_release(struct i40e_veb *veb);
191 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
192 struct i40e_vsi *vsi);
193 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
194 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
195 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
196 struct i40e_macvlan_filter *mv_f,
198 struct ether_addr *addr);
199 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
200 struct i40e_macvlan_filter *mv_f,
203 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
204 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
205 struct rte_eth_rss_conf *rss_conf);
206 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
207 struct rte_eth_rss_conf *rss_conf);
209 /* Default hash key buffer for RSS */
210 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
212 static struct rte_pci_id pci_id_i40e_map[] = {
213 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
214 #include "rte_pci_dev_ids.h"
215 { .vendor_id = 0, /* sentinel */ },
218 static struct eth_dev_ops i40e_eth_dev_ops = {
219 .dev_configure = i40e_dev_configure,
220 .dev_start = i40e_dev_start,
221 .dev_stop = i40e_dev_stop,
222 .dev_close = i40e_dev_close,
223 .promiscuous_enable = i40e_dev_promiscuous_enable,
224 .promiscuous_disable = i40e_dev_promiscuous_disable,
225 .allmulticast_enable = i40e_dev_allmulticast_enable,
226 .allmulticast_disable = i40e_dev_allmulticast_disable,
227 .dev_set_link_up = i40e_dev_set_link_up,
228 .dev_set_link_down = i40e_dev_set_link_down,
229 .link_update = i40e_dev_link_update,
230 .stats_get = i40e_dev_stats_get,
231 .stats_reset = i40e_dev_stats_reset,
232 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
233 .dev_infos_get = i40e_dev_info_get,
234 .vlan_filter_set = i40e_vlan_filter_set,
235 .vlan_tpid_set = i40e_vlan_tpid_set,
236 .vlan_offload_set = i40e_vlan_offload_set,
237 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
238 .vlan_pvid_set = i40e_vlan_pvid_set,
239 .rx_queue_start = i40e_dev_rx_queue_start,
240 .rx_queue_stop = i40e_dev_rx_queue_stop,
241 .tx_queue_start = i40e_dev_tx_queue_start,
242 .tx_queue_stop = i40e_dev_tx_queue_stop,
243 .rx_queue_setup = i40e_dev_rx_queue_setup,
244 .rx_queue_release = i40e_dev_rx_queue_release,
245 .rx_queue_count = i40e_dev_rx_queue_count,
246 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
247 .tx_queue_setup = i40e_dev_tx_queue_setup,
248 .tx_queue_release = i40e_dev_tx_queue_release,
249 .dev_led_on = i40e_dev_led_on,
250 .dev_led_off = i40e_dev_led_off,
251 .flow_ctrl_set = i40e_flow_ctrl_set,
252 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
253 .mac_addr_add = i40e_macaddr_add,
254 .mac_addr_remove = i40e_macaddr_remove,
255 .reta_update = i40e_dev_rss_reta_update,
256 .reta_query = i40e_dev_rss_reta_query,
257 .rss_hash_update = i40e_dev_rss_hash_update,
258 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
261 static struct eth_driver rte_i40e_pmd = {
263 .name = "rte_i40e_pmd",
264 .id_table = pci_id_i40e_map,
265 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
267 .eth_dev_init = eth_i40e_dev_init,
268 .dev_private_size = sizeof(struct i40e_adapter),
272 i40e_prev_power_of_2(int n)
290 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
291 struct rte_eth_link *link)
293 struct rte_eth_link *dst = link;
294 struct rte_eth_link *src = &(dev->data->dev_link);
296 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
297 *(uint64_t *)src) == 0)
304 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
305 struct rte_eth_link *link)
307 struct rte_eth_link *dst = &(dev->data->dev_link);
308 struct rte_eth_link *src = link;
310 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
311 *(uint64_t *)src) == 0)
318 * Driver initialization routine.
319 * Invoked once at EAL init time.
320 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
323 rte_i40e_pmd_init(const char *name __rte_unused,
324 const char *params __rte_unused)
326 PMD_INIT_FUNC_TRACE();
327 rte_eth_driver_register(&rte_i40e_pmd);
332 static struct rte_driver rte_i40e_driver = {
334 .init = rte_i40e_pmd_init,
337 PMD_REGISTER_DRIVER(rte_i40e_driver);
340 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
341 struct rte_eth_dev *dev)
343 struct rte_pci_device *pci_dev;
344 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
345 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
346 struct i40e_vsi *vsi;
351 PMD_INIT_FUNC_TRACE();
353 dev->dev_ops = &i40e_eth_dev_ops;
354 dev->rx_pkt_burst = i40e_recv_pkts;
355 dev->tx_pkt_burst = i40e_xmit_pkts;
357 /* for secondary processes, we don't initialise any further as primary
358 * has already done this work. Only check we don't need a different
360 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
361 if (dev->data->scattered_rx)
362 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
365 pci_dev = dev->pci_dev;
366 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
367 pf->adapter->eth_dev = dev;
368 pf->dev_data = dev->data;
370 hw->back = I40E_PF_TO_ADAPTER(pf);
371 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
373 PMD_INIT_LOG(ERR, "Hardware is not available, "
374 "as address is NULL\n");
378 hw->vendor_id = pci_dev->id.vendor_id;
379 hw->device_id = pci_dev->id.device_id;
380 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
381 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
382 hw->bus.device = pci_dev->addr.devid;
383 hw->bus.func = pci_dev->addr.function;
385 /* Make sure all is clean before doing PF reset */
388 /* Reset here to make sure all is clean for each PF */
389 ret = i40e_pf_reset(hw);
391 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
395 /* Initialize the shared code (base driver) */
396 ret = i40e_init_shared_code(hw);
398 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
402 /* Initialize the parameters for adminq */
403 i40e_init_adminq_parameter(hw);
404 ret = i40e_init_adminq(hw);
405 if (ret != I40E_SUCCESS) {
406 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
409 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
410 "%02d.%02d.%02d eetrack %04x\n",
411 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
412 hw->aq.api_maj_ver, hw->aq.api_min_ver,
413 ((hw->nvm.version >> 12) & 0xf),
414 ((hw->nvm.version >> 4) & 0xff),
415 (hw->nvm.version & 0xf), hw->nvm.eetrack);
418 ret = i40e_aq_stop_lldp(hw, true, NULL);
419 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
420 PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
423 i40e_clear_pxe_mode(hw);
425 /* Get hw capabilities */
426 ret = i40e_get_cap(hw);
427 if (ret != I40E_SUCCESS) {
428 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
429 goto err_get_capabilities;
432 /* Initialize parameters for PF */
433 ret = i40e_pf_parameter_init(dev);
435 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
436 goto err_parameter_init;
439 /* Initialize the queue management */
440 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
442 PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
443 goto err_qp_pool_init;
445 ret = i40e_res_pool_init(&pf->msix_pool, 1,
446 hw->func_caps.num_msix_vectors - 1);
448 PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
449 goto err_msix_pool_init;
452 /* Initialize lan hmc */
453 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
454 hw->func_caps.num_rx_qp, 0, 0);
455 if (ret != I40E_SUCCESS) {
456 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
457 goto err_init_lan_hmc;
460 /* Configure lan hmc */
461 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
462 if (ret != I40E_SUCCESS) {
463 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
464 goto err_configure_lan_hmc;
467 /* Get and check the mac address */
468 i40e_get_mac_addr(hw, hw->mac.addr);
469 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
470 PMD_INIT_LOG(ERR, "mac address is not valid");
472 goto err_get_mac_addr;
474 /* Copy the permanent MAC address */
475 ether_addr_copy((struct ether_addr *) hw->mac.addr,
476 (struct ether_addr *) hw->mac.perm_addr);
478 /* Disable flow control */
479 hw->fc.requested_mode = I40E_FC_NONE;
480 i40e_set_fc(hw, &aq_fail, TRUE);
482 /* PF setup, which includes VSI setup */
483 ret = i40e_pf_setup(pf);
485 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
486 goto err_setup_pf_switch;
491 /* Disable double vlan by default */
492 i40e_vsi_config_double_vlan(vsi, FALSE);
494 if (!vsi->max_macaddrs)
495 len = ETHER_ADDR_LEN;
497 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
499 /* Should be after VSI initialized */
500 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
501 if (!dev->data->mac_addrs) {
502 PMD_INIT_LOG(ERR, "Failed to allocated memory "
503 "for storing mac address");
504 goto err_get_mac_addr;
506 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
507 &dev->data->mac_addrs[0]);
509 /* initialize pf host driver to setup SRIOV resource if applicable */
510 i40e_pf_host_init(dev);
512 /* register callback func to eal lib */
513 rte_intr_callback_register(&(pci_dev->intr_handle),
514 i40e_dev_interrupt_handler, (void *)dev);
516 /* configure and enable device interrupt */
517 i40e_pf_config_irq0(hw);
518 i40e_pf_enable_irq0(hw);
520 /* enable uio intr after callback register */
521 rte_intr_enable(&(pci_dev->intr_handle));
526 rte_free(pf->main_vsi);
528 err_configure_lan_hmc:
529 (void)i40e_shutdown_lan_hmc(hw);
531 i40e_res_pool_destroy(&pf->msix_pool);
533 i40e_res_pool_destroy(&pf->qp_pool);
536 err_get_capabilities:
537 (void)i40e_shutdown_adminq(hw);
543 i40e_dev_configure(struct rte_eth_dev *dev)
545 return i40e_dev_init_vlan(dev);
549 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
551 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
552 uint16_t msix_vect = vsi->msix_intr;
555 for (i = 0; i < vsi->nb_qps; i++) {
556 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
557 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
561 if (vsi->type != I40E_VSI_SRIOV) {
562 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
563 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
567 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
568 vsi->user_param + (msix_vect - 1);
570 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
572 I40E_WRITE_FLUSH(hw);
575 static inline uint16_t
576 i40e_calc_itr_interval(int16_t interval)
578 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
579 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
581 /* Convert to hardware count, as writing each 1 represents 2 us */
586 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
589 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
590 uint16_t msix_vect = vsi->msix_intr;
591 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
594 for (i = 0; i < vsi->nb_qps; i++)
595 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
597 /* Bind all RX queues to allocated MSIX interrupt */
598 for (i = 0; i < vsi->nb_qps; i++) {
599 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
600 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
601 ((vsi->base_queue + i + 1) <<
602 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
603 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
604 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
606 if (i == vsi->nb_qps - 1)
607 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
608 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
611 /* Write first RX queue to Link list register as the head element */
612 if (vsi->type != I40E_VSI_SRIOV) {
613 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
614 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
615 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
617 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
618 msix_vect - 1), interval);
620 /* Disable auto-mask on enabling of all none-zero interrupt */
621 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
622 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
626 /* num_msix_vectors_vf needs to minus irq0 */
627 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
628 vsi->user_param + (msix_vect - 1);
630 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
631 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
632 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
635 I40E_WRITE_FLUSH(hw);
639 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
641 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
642 uint16_t interval = i40e_calc_itr_interval(\
643 RTE_LIBRTE_I40E_ITR_INTERVAL);
645 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
646 I40E_PFINT_DYN_CTLN_INTENA_MASK |
647 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
648 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
649 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
653 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
655 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
657 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
660 static inline uint8_t
661 i40e_parse_link_speed(uint16_t eth_link_speed)
663 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
665 switch (eth_link_speed) {
666 case ETH_LINK_SPEED_40G:
667 link_speed = I40E_LINK_SPEED_40GB;
669 case ETH_LINK_SPEED_20G:
670 link_speed = I40E_LINK_SPEED_20GB;
672 case ETH_LINK_SPEED_10G:
673 link_speed = I40E_LINK_SPEED_10GB;
675 case ETH_LINK_SPEED_1000:
676 link_speed = I40E_LINK_SPEED_1GB;
678 case ETH_LINK_SPEED_100:
679 link_speed = I40E_LINK_SPEED_100MB;
687 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
689 enum i40e_status_code status;
690 struct i40e_aq_get_phy_abilities_resp phy_ab;
691 struct i40e_aq_set_phy_config phy_conf;
692 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
693 I40E_AQ_PHY_FLAG_PAUSE_RX |
694 I40E_AQ_PHY_FLAG_LOW_POWER;
695 const uint8_t advt = I40E_LINK_SPEED_40GB |
696 I40E_LINK_SPEED_10GB |
697 I40E_LINK_SPEED_1GB |
698 I40E_LINK_SPEED_100MB;
701 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
706 memset(&phy_conf, 0, sizeof(phy_conf));
708 /* bits 0-2 use the values from get_phy_abilities_resp */
710 abilities |= phy_ab.abilities & mask;
712 /* update ablities and speed */
713 if (abilities & I40E_AQ_PHY_AN_ENABLED)
714 phy_conf.link_speed = advt;
716 phy_conf.link_speed = force_speed;
718 phy_conf.abilities = abilities;
720 /* use get_phy_abilities_resp value for the rest */
721 phy_conf.phy_type = phy_ab.phy_type;
722 phy_conf.eee_capability = phy_ab.eee_capability;
723 phy_conf.eeer = phy_ab.eeer_val;
724 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
726 PMD_DRV_LOG(DEBUG, "\n\tCurrent: abilities %x, link_speed %x\n"
727 "\tConfig: abilities %x, link_speed %x",
728 phy_ab.abilities, phy_ab.link_speed,
729 phy_conf.abilities, phy_conf.link_speed);
731 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
739 i40e_apply_link_speed(struct rte_eth_dev *dev)
742 uint8_t abilities = 0;
743 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
744 struct rte_eth_conf *conf = &dev->data->dev_conf;
746 speed = i40e_parse_link_speed(conf->link_speed);
747 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
748 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
749 abilities |= I40E_AQ_PHY_AN_ENABLED;
751 abilities |= I40E_AQ_PHY_LINK_ENABLED;
753 return i40e_phy_conf_link(hw, abilities, speed);
757 i40e_dev_start(struct rte_eth_dev *dev)
759 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
760 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
761 struct i40e_vsi *vsi = pf->main_vsi;
764 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
765 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
766 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
767 dev->data->dev_conf.link_duplex,
773 ret = i40e_vsi_init(vsi);
774 if (ret != I40E_SUCCESS) {
775 PMD_DRV_LOG(ERR, "Failed to init VSI\n");
779 /* Map queues with MSIX interrupt */
780 i40e_vsi_queues_bind_intr(vsi);
781 i40e_vsi_enable_queues_intr(vsi);
783 /* Enable all queues which have been configured */
784 ret = i40e_vsi_switch_queues(vsi, TRUE);
785 if (ret != I40E_SUCCESS) {
786 PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
790 /* Enable receiving broadcast packets */
791 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
792 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
793 if (ret != I40E_SUCCESS)
794 PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
797 /* Apply link configure */
798 ret = i40e_apply_link_speed(dev);
799 if (I40E_SUCCESS != ret) {
800 PMD_DRV_LOG(ERR, "Fail to apply link setting\n");
807 i40e_vsi_switch_queues(vsi, FALSE);
813 i40e_dev_stop(struct rte_eth_dev *dev)
815 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
816 struct i40e_vsi *vsi = pf->main_vsi;
818 /* Disable all queues */
819 i40e_vsi_switch_queues(vsi, FALSE);
822 i40e_dev_set_link_down(dev);
824 /* un-map queues with interrupt registers */
825 i40e_vsi_disable_queues_intr(vsi);
826 i40e_vsi_queues_unbind_intr(vsi);
830 i40e_dev_close(struct rte_eth_dev *dev)
832 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
833 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
836 PMD_INIT_FUNC_TRACE();
840 /* Disable interrupt */
841 i40e_pf_disable_irq0(hw);
842 rte_intr_disable(&(dev->pci_dev->intr_handle));
844 /* shutdown and destroy the HMC */
845 i40e_shutdown_lan_hmc(hw);
847 /* release all the existing VSIs and VEBs */
848 i40e_vsi_release(pf->main_vsi);
850 /* shutdown the adminq */
851 i40e_aq_queue_shutdown(hw, true);
852 i40e_shutdown_adminq(hw);
854 i40e_res_pool_destroy(&pf->qp_pool);
855 i40e_res_pool_destroy(&pf->msix_pool);
857 /* force a PF reset to clean anything leftover */
858 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
859 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
860 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
861 I40E_WRITE_FLUSH(hw);
865 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
867 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
868 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
869 struct i40e_vsi *vsi = pf->main_vsi;
872 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
874 if (status != I40E_SUCCESS)
875 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
877 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
879 if (status != I40E_SUCCESS)
880 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
885 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
887 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
888 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
889 struct i40e_vsi *vsi = pf->main_vsi;
892 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
894 if (status != I40E_SUCCESS)
895 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
897 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
899 if (status != I40E_SUCCESS)
900 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
904 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
906 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
907 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
908 struct i40e_vsi *vsi = pf->main_vsi;
911 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
912 if (ret != I40E_SUCCESS)
913 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
917 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
919 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
920 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
921 struct i40e_vsi *vsi = pf->main_vsi;
924 if (dev->data->promiscuous == 1)
925 return; /* must remain in all_multicast mode */
927 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
928 vsi->seid, FALSE, NULL);
929 if (ret != I40E_SUCCESS)
930 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
934 * Set device link up.
937 i40e_dev_set_link_up(struct rte_eth_dev *dev)
939 /* re-apply link speed setting */
940 return i40e_apply_link_speed(dev);
944 * Set device link down.
947 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
949 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
950 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
951 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
953 return i40e_phy_conf_link(hw, abilities, speed);
957 i40e_dev_link_update(struct rte_eth_dev *dev,
958 __rte_unused int wait_to_complete)
960 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
961 struct i40e_link_status link_status;
962 struct rte_eth_link link, old;
965 memset(&link, 0, sizeof(link));
966 memset(&old, 0, sizeof(old));
967 memset(&link_status, 0, sizeof(link_status));
968 rte_i40e_dev_atomic_read_link_status(dev, &old);
970 /* Get link status information from hardware */
971 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
972 if (status != I40E_SUCCESS) {
973 link.link_speed = ETH_LINK_SPEED_100;
974 link.link_duplex = ETH_LINK_FULL_DUPLEX;
975 PMD_DRV_LOG(ERR, "Failed to get link info\n");
979 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
981 if (!link.link_status)
984 /* i40e uses full duplex only */
985 link.link_duplex = ETH_LINK_FULL_DUPLEX;
987 /* Parse the link status */
988 switch (link_status.link_speed) {
989 case I40E_LINK_SPEED_100MB:
990 link.link_speed = ETH_LINK_SPEED_100;
992 case I40E_LINK_SPEED_1GB:
993 link.link_speed = ETH_LINK_SPEED_1000;
995 case I40E_LINK_SPEED_10GB:
996 link.link_speed = ETH_LINK_SPEED_10G;
998 case I40E_LINK_SPEED_20GB:
999 link.link_speed = ETH_LINK_SPEED_20G;
1001 case I40E_LINK_SPEED_40GB:
1002 link.link_speed = ETH_LINK_SPEED_40G;
1005 link.link_speed = ETH_LINK_SPEED_100;
1010 rte_i40e_dev_atomic_write_link_status(dev, &link);
1011 if (link.link_status == old.link_status)
1017 /* Get all the statistics of a VSI */
1019 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1021 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1022 struct i40e_eth_stats *nes = &vsi->eth_stats;
1023 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1024 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1026 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1027 vsi->offset_loaded, &oes->rx_bytes,
1029 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1030 vsi->offset_loaded, &oes->rx_unicast,
1032 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1033 vsi->offset_loaded, &oes->rx_multicast,
1034 &nes->rx_multicast);
1035 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1036 vsi->offset_loaded, &oes->rx_broadcast,
1037 &nes->rx_broadcast);
1038 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1039 &oes->rx_discards, &nes->rx_discards);
1040 /* GLV_REPC not supported */
1041 /* GLV_RMPC not supported */
1042 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1043 &oes->rx_unknown_protocol,
1044 &nes->rx_unknown_protocol);
1045 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1046 vsi->offset_loaded, &oes->tx_bytes,
1048 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1049 vsi->offset_loaded, &oes->tx_unicast,
1051 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1052 vsi->offset_loaded, &oes->tx_multicast,
1053 &nes->tx_multicast);
1054 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1055 vsi->offset_loaded, &oes->tx_broadcast,
1056 &nes->tx_broadcast);
1057 /* GLV_TDPC not supported */
1058 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1059 &oes->tx_errors, &nes->tx_errors);
1060 vsi->offset_loaded = true;
1062 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1063 printf("***************** VSI[%u] stats start *******************\n",
1065 printf("rx_bytes: %lu\n", nes->rx_bytes);
1066 printf("rx_unicast: %lu\n", nes->rx_unicast);
1067 printf("rx_multicast: %lu\n", nes->rx_multicast);
1068 printf("rx_broadcast: %lu\n", nes->rx_broadcast);
1069 printf("rx_discards: %lu\n", nes->rx_discards);
1070 printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
1071 printf("tx_bytes: %lu\n", nes->tx_bytes);
1072 printf("tx_unicast: %lu\n", nes->tx_unicast);
1073 printf("tx_multicast: %lu\n", nes->tx_multicast);
1074 printf("tx_broadcast: %lu\n", nes->tx_broadcast);
1075 printf("tx_discards: %lu\n", nes->tx_discards);
1076 printf("tx_errors: %lu\n", nes->tx_errors);
1077 printf("***************** VSI[%u] stats end *******************\n",
1079 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1082 /* Get all statistics of a port */
1084 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1087 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1088 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1090 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1092 /* Get statistics of struct i40e_eth_stats */
1093 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1094 I40E_GLPRT_GORCL(hw->port),
1095 pf->offset_loaded, &os->eth.rx_bytes,
1097 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1098 I40E_GLPRT_UPRCL(hw->port),
1099 pf->offset_loaded, &os->eth.rx_unicast,
1100 &ns->eth.rx_unicast);
1101 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1102 I40E_GLPRT_MPRCL(hw->port),
1103 pf->offset_loaded, &os->eth.rx_multicast,
1104 &ns->eth.rx_multicast);
1105 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1106 I40E_GLPRT_BPRCL(hw->port),
1107 pf->offset_loaded, &os->eth.rx_broadcast,
1108 &ns->eth.rx_broadcast);
1109 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1110 pf->offset_loaded, &os->eth.rx_discards,
1111 &ns->eth.rx_discards);
1112 /* GLPRT_REPC not supported */
1113 /* GLPRT_RMPC not supported */
1114 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1116 &os->eth.rx_unknown_protocol,
1117 &ns->eth.rx_unknown_protocol);
1118 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1119 I40E_GLPRT_GOTCL(hw->port),
1120 pf->offset_loaded, &os->eth.tx_bytes,
1122 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1123 I40E_GLPRT_UPTCL(hw->port),
1124 pf->offset_loaded, &os->eth.tx_unicast,
1125 &ns->eth.tx_unicast);
1126 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1127 I40E_GLPRT_MPTCL(hw->port),
1128 pf->offset_loaded, &os->eth.tx_multicast,
1129 &ns->eth.tx_multicast);
1130 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1131 I40E_GLPRT_BPTCL(hw->port),
1132 pf->offset_loaded, &os->eth.tx_broadcast,
1133 &ns->eth.tx_broadcast);
1134 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1135 pf->offset_loaded, &os->eth.tx_discards,
1136 &ns->eth.tx_discards);
1137 /* GLPRT_TEPC not supported */
1139 /* additional port specific stats */
1140 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1141 pf->offset_loaded, &os->tx_dropped_link_down,
1142 &ns->tx_dropped_link_down);
1143 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1144 pf->offset_loaded, &os->crc_errors,
1146 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1147 pf->offset_loaded, &os->illegal_bytes,
1148 &ns->illegal_bytes);
1149 /* GLPRT_ERRBC not supported */
1150 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1151 pf->offset_loaded, &os->mac_local_faults,
1152 &ns->mac_local_faults);
1153 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1154 pf->offset_loaded, &os->mac_remote_faults,
1155 &ns->mac_remote_faults);
1156 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1157 pf->offset_loaded, &os->rx_length_errors,
1158 &ns->rx_length_errors);
1159 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1160 pf->offset_loaded, &os->link_xon_rx,
1162 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1163 pf->offset_loaded, &os->link_xoff_rx,
1165 for (i = 0; i < 8; i++) {
1166 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1168 &os->priority_xon_rx[i],
1169 &ns->priority_xon_rx[i]);
1170 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1172 &os->priority_xoff_rx[i],
1173 &ns->priority_xoff_rx[i]);
1175 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1176 pf->offset_loaded, &os->link_xon_tx,
1178 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1179 pf->offset_loaded, &os->link_xoff_tx,
1181 for (i = 0; i < 8; i++) {
1182 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1184 &os->priority_xon_tx[i],
1185 &ns->priority_xon_tx[i]);
1186 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1188 &os->priority_xoff_tx[i],
1189 &ns->priority_xoff_tx[i]);
1190 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1192 &os->priority_xon_2_xoff[i],
1193 &ns->priority_xon_2_xoff[i]);
1195 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1196 I40E_GLPRT_PRC64L(hw->port),
1197 pf->offset_loaded, &os->rx_size_64,
1199 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1200 I40E_GLPRT_PRC127L(hw->port),
1201 pf->offset_loaded, &os->rx_size_127,
1203 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1204 I40E_GLPRT_PRC255L(hw->port),
1205 pf->offset_loaded, &os->rx_size_255,
1207 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1208 I40E_GLPRT_PRC511L(hw->port),
1209 pf->offset_loaded, &os->rx_size_511,
1211 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1212 I40E_GLPRT_PRC1023L(hw->port),
1213 pf->offset_loaded, &os->rx_size_1023,
1215 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1216 I40E_GLPRT_PRC1522L(hw->port),
1217 pf->offset_loaded, &os->rx_size_1522,
1219 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1220 I40E_GLPRT_PRC9522L(hw->port),
1221 pf->offset_loaded, &os->rx_size_big,
1223 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1224 pf->offset_loaded, &os->rx_undersize,
1226 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1227 pf->offset_loaded, &os->rx_fragments,
1229 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1230 pf->offset_loaded, &os->rx_oversize,
1232 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1233 pf->offset_loaded, &os->rx_jabber,
1235 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1236 I40E_GLPRT_PTC64L(hw->port),
1237 pf->offset_loaded, &os->tx_size_64,
1239 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1240 I40E_GLPRT_PTC127L(hw->port),
1241 pf->offset_loaded, &os->tx_size_127,
1243 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1244 I40E_GLPRT_PTC255L(hw->port),
1245 pf->offset_loaded, &os->tx_size_255,
1247 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1248 I40E_GLPRT_PTC511L(hw->port),
1249 pf->offset_loaded, &os->tx_size_511,
1251 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1252 I40E_GLPRT_PTC1023L(hw->port),
1253 pf->offset_loaded, &os->tx_size_1023,
1255 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1256 I40E_GLPRT_PTC1522L(hw->port),
1257 pf->offset_loaded, &os->tx_size_1522,
1259 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1260 I40E_GLPRT_PTC9522L(hw->port),
1261 pf->offset_loaded, &os->tx_size_big,
1263 /* GLPRT_MSPDC not supported */
1264 /* GLPRT_XEC not supported */
1266 pf->offset_loaded = true;
1268 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1269 ns->eth.rx_broadcast;
1270 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1271 ns->eth.tx_broadcast;
1272 stats->ibytes = ns->eth.rx_bytes;
1273 stats->obytes = ns->eth.tx_bytes;
1274 stats->oerrors = ns->eth.tx_errors;
1275 stats->imcasts = ns->eth.rx_multicast;
1278 i40e_update_vsi_stats(pf->main_vsi);
1280 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1281 printf("***************** PF stats start *******************\n");
1282 printf("rx_bytes: %lu\n", ns->eth.rx_bytes);
1283 printf("rx_unicast: %lu\n", ns->eth.rx_unicast);
1284 printf("rx_multicast: %lu\n", ns->eth.rx_multicast);
1285 printf("rx_broadcast: %lu\n", ns->eth.rx_broadcast);
1286 printf("rx_discards: %lu\n", ns->eth.rx_discards);
1287 printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
1288 printf("tx_bytes: %lu\n", ns->eth.tx_bytes);
1289 printf("tx_unicast: %lu\n", ns->eth.tx_unicast);
1290 printf("tx_multicast: %lu\n", ns->eth.tx_multicast);
1291 printf("tx_broadcast: %lu\n", ns->eth.tx_broadcast);
1292 printf("tx_discards: %lu\n", ns->eth.tx_discards);
1293 printf("tx_errors: %lu\n", ns->eth.tx_errors);
1295 printf("tx_dropped_link_down: %lu\n", ns->tx_dropped_link_down);
1296 printf("crc_errors: %lu\n", ns->crc_errors);
1297 printf("illegal_bytes: %lu\n", ns->illegal_bytes);
1298 printf("error_bytes: %lu\n", ns->error_bytes);
1299 printf("mac_local_faults: %lu\n", ns->mac_local_faults);
1300 printf("mac_remote_faults: %lu\n", ns->mac_remote_faults);
1301 printf("rx_length_errors: %lu\n", ns->rx_length_errors);
1302 printf("link_xon_rx: %lu\n", ns->link_xon_rx);
1303 printf("link_xoff_rx: %lu\n", ns->link_xoff_rx);
1304 for (i = 0; i < 8; i++) {
1305 printf("priority_xon_rx[%d]: %lu\n",
1306 i, ns->priority_xon_rx[i]);
1307 printf("priority_xoff_rx[%d]: %lu\n",
1308 i, ns->priority_xoff_rx[i]);
1310 printf("link_xon_tx: %lu\n", ns->link_xon_tx);
1311 printf("link_xoff_tx: %lu\n", ns->link_xoff_tx);
1312 for (i = 0; i < 8; i++) {
1313 printf("priority_xon_tx[%d]: %lu\n",
1314 i, ns->priority_xon_tx[i]);
1315 printf("priority_xoff_tx[%d]: %lu\n",
1316 i, ns->priority_xoff_tx[i]);
1317 printf("priority_xon_2_xoff[%d]: %lu\n",
1318 i, ns->priority_xon_2_xoff[i]);
1320 printf("rx_size_64: %lu\n", ns->rx_size_64);
1321 printf("rx_size_127: %lu\n", ns->rx_size_127);
1322 printf("rx_size_255: %lu\n", ns->rx_size_255);
1323 printf("rx_size_511: %lu\n", ns->rx_size_511);
1324 printf("rx_size_1023: %lu\n", ns->rx_size_1023);
1325 printf("rx_size_1522: %lu\n", ns->rx_size_1522);
1326 printf("rx_size_big: %lu\n", ns->rx_size_big);
1327 printf("rx_undersize: %lu\n", ns->rx_undersize);
1328 printf("rx_fragments: %lu\n", ns->rx_fragments);
1329 printf("rx_oversize: %lu\n", ns->rx_oversize);
1330 printf("rx_jabber: %lu\n", ns->rx_jabber);
1331 printf("tx_size_64: %lu\n", ns->tx_size_64);
1332 printf("tx_size_127: %lu\n", ns->tx_size_127);
1333 printf("tx_size_255: %lu\n", ns->tx_size_255);
1334 printf("tx_size_511: %lu\n", ns->tx_size_511);
1335 printf("tx_size_1023: %lu\n", ns->tx_size_1023);
1336 printf("tx_size_1522: %lu\n", ns->tx_size_1522);
1337 printf("tx_size_big: %lu\n", ns->tx_size_big);
1338 printf("mac_short_packet_dropped: %lu\n",
1339 ns->mac_short_packet_dropped);
1340 printf("checksum_error: %lu\n", ns->checksum_error);
1341 printf("***************** PF stats end ********************\n");
1342 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1345 /* Reset the statistics */
1347 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1349 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1351 /* It results in reloading the start point of each counter */
1352 pf->offset_loaded = false;
1356 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1357 __rte_unused uint16_t queue_id,
1358 __rte_unused uint8_t stat_idx,
1359 __rte_unused uint8_t is_rx)
1361 PMD_INIT_FUNC_TRACE();
1367 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1369 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1370 struct i40e_vsi *vsi = pf->main_vsi;
1372 dev_info->max_rx_queues = vsi->nb_qps;
1373 dev_info->max_tx_queues = vsi->nb_qps;
1374 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1375 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1376 dev_info->max_mac_addrs = vsi->max_macaddrs;
1377 dev_info->max_vfs = dev->pci_dev->max_vfs;
1378 dev_info->rx_offload_capa =
1379 DEV_RX_OFFLOAD_VLAN_STRIP |
1380 DEV_RX_OFFLOAD_IPV4_CKSUM |
1381 DEV_RX_OFFLOAD_UDP_CKSUM |
1382 DEV_RX_OFFLOAD_TCP_CKSUM;
1383 dev_info->tx_offload_capa =
1384 DEV_TX_OFFLOAD_VLAN_INSERT |
1385 DEV_TX_OFFLOAD_IPV4_CKSUM |
1386 DEV_TX_OFFLOAD_UDP_CKSUM |
1387 DEV_TX_OFFLOAD_TCP_CKSUM |
1388 DEV_TX_OFFLOAD_SCTP_CKSUM;
1392 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1394 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1395 struct i40e_vsi *vsi = pf->main_vsi;
1396 PMD_INIT_FUNC_TRACE();
1399 return i40e_vsi_add_vlan(vsi, vlan_id);
1401 return i40e_vsi_delete_vlan(vsi, vlan_id);
1405 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1406 __rte_unused uint16_t tpid)
1408 PMD_INIT_FUNC_TRACE();
1412 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1414 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1415 struct i40e_vsi *vsi = pf->main_vsi;
1417 if (mask & ETH_VLAN_STRIP_MASK) {
1418 /* Enable or disable VLAN stripping */
1419 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1420 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1422 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1425 if (mask & ETH_VLAN_EXTEND_MASK) {
1426 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1427 i40e_vsi_config_double_vlan(vsi, TRUE);
1429 i40e_vsi_config_double_vlan(vsi, FALSE);
1434 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1435 __rte_unused uint16_t queue,
1436 __rte_unused int on)
1438 PMD_INIT_FUNC_TRACE();
1442 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1444 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1445 struct i40e_vsi *vsi = pf->main_vsi;
1446 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1447 struct i40e_vsi_vlan_pvid_info info;
1449 memset(&info, 0, sizeof(info));
1452 info.config.pvid = pvid;
1454 info.config.reject.tagged =
1455 data->dev_conf.txmode.hw_vlan_reject_tagged;
1456 info.config.reject.untagged =
1457 data->dev_conf.txmode.hw_vlan_reject_untagged;
1460 return i40e_vsi_vlan_pvid_set(vsi, &info);
1464 i40e_dev_led_on(struct rte_eth_dev *dev)
1466 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467 uint32_t mode = i40e_led_get(hw);
1470 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1476 i40e_dev_led_off(struct rte_eth_dev *dev)
1478 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1479 uint32_t mode = i40e_led_get(hw);
1482 i40e_led_set(hw, 0, false);
1488 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1489 __rte_unused struct rte_eth_fc_conf *fc_conf)
1491 PMD_INIT_FUNC_TRACE();
1497 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1498 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1500 PMD_INIT_FUNC_TRACE();
1505 /* Add a MAC address, and update filters */
1507 i40e_macaddr_add(struct rte_eth_dev *dev,
1508 struct ether_addr *mac_addr,
1509 __attribute__((unused)) uint32_t index,
1510 __attribute__((unused)) uint32_t pool)
1512 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1513 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514 struct i40e_vsi *vsi = pf->main_vsi;
1515 struct ether_addr old_mac;
1518 if (!is_valid_assigned_ether_addr(mac_addr)) {
1519 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1523 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1524 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
1528 /* Write mac address */
1529 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1530 mac_addr->addr_bytes, NULL);
1531 if (ret != I40E_SUCCESS) {
1532 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1536 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1537 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1540 ret = i40e_vsi_add_mac(vsi, mac_addr);
1541 if (ret != I40E_SUCCESS) {
1542 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
1546 ether_addr_copy(mac_addr, &pf->dev_addr);
1547 i40e_vsi_delete_mac(vsi, &old_mac);
1550 /* Remove a MAC address, and update filters */
1552 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1554 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1555 struct i40e_vsi *vsi = pf->main_vsi;
1556 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1557 struct ether_addr *macaddr;
1559 struct i40e_hw *hw =
1560 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1562 if (index >= vsi->max_macaddrs)
1565 macaddr = &(data->mac_addrs[index]);
1566 if (!is_valid_assigned_ether_addr(macaddr))
1569 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1570 hw->mac.perm_addr, NULL);
1571 if (ret != I40E_SUCCESS) {
1572 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1576 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1578 ret = i40e_vsi_delete_mac(vsi, macaddr);
1579 if (ret != I40E_SUCCESS)
1582 /* Clear device address as it has been removed */
1583 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1584 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1588 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1589 struct rte_eth_rss_reta *reta_conf)
1591 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1595 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1597 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1599 mask = (uint8_t)((reta_conf->mask_hi >>
1608 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1610 for (j = 0, lut = 0; j < 4; j++) {
1611 if (mask & (0x1 << j))
1612 lut |= reta_conf->reta[i + j] << (8 * j);
1614 lut |= l & (0xFF << (8 * j));
1616 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1623 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1624 struct rte_eth_rss_reta *reta_conf)
1626 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1628 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1630 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1632 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1634 mask = (uint8_t)((reta_conf->mask_hi >>
1640 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1641 for (j = 0; j < 4; j++) {
1642 if (mask & (0x1 << j))
1643 reta_conf->reta[i + j] =
1644 (uint8_t)((lut >> (8 * j)) & 0xFF);
1652 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1653 * @hw: pointer to the HW structure
1654 * @mem: pointer to mem struct to fill out
1655 * @size: size of memory requested
1656 * @alignment: what to align the allocation to
1658 enum i40e_status_code
1659 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1660 struct i40e_dma_mem *mem,
1664 static uint64_t id = 0;
1665 const struct rte_memzone *mz = NULL;
1666 char z_name[RTE_MEMZONE_NAMESIZE];
1669 return I40E_ERR_PARAM;
1672 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1673 #ifdef RTE_LIBRTE_XEN_DOM0
1674 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1677 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1680 return I40E_ERR_NO_MEMORY;
1685 #ifdef RTE_LIBRTE_XEN_DOM0
1686 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1688 mem->pa = mz->phys_addr;
1691 return I40E_SUCCESS;
1695 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1696 * @hw: pointer to the HW structure
1697 * @mem: ptr to mem struct to free
1699 enum i40e_status_code
1700 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1701 struct i40e_dma_mem *mem)
1703 if (!mem || !mem->va)
1704 return I40E_ERR_PARAM;
1709 return I40E_SUCCESS;
1713 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1714 * @hw: pointer to the HW structure
1715 * @mem: pointer to mem struct to fill out
1716 * @size: size of memory requested
1718 enum i40e_status_code
1719 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1720 struct i40e_virt_mem *mem,
1724 return I40E_ERR_PARAM;
1727 mem->va = rte_zmalloc("i40e", size, 0);
1730 return I40E_SUCCESS;
1732 return I40E_ERR_NO_MEMORY;
1736 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1737 * @hw: pointer to the HW structure
1738 * @mem: pointer to mem struct to free
1740 enum i40e_status_code
1741 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1742 struct i40e_virt_mem *mem)
1745 return I40E_ERR_PARAM;
1750 return I40E_SUCCESS;
1754 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1756 rte_spinlock_init(&sp->spinlock);
1760 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1762 rte_spinlock_lock(&sp->spinlock);
1766 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1768 rte_spinlock_unlock(&sp->spinlock);
1772 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1778 * Get the hardware capabilities, which will be parsed
1779 * and saved into struct i40e_hw.
1782 i40e_get_cap(struct i40e_hw *hw)
1784 struct i40e_aqc_list_capabilities_element_resp *buf;
1785 uint16_t len, size = 0;
1788 /* Calculate a huge enough buff for saving response data temporarily */
1789 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1790 I40E_MAX_CAP_ELE_NUM;
1791 buf = rte_zmalloc("i40e", len, 0);
1793 PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
1794 return I40E_ERR_NO_MEMORY;
1797 /* Get, parse the capabilities and save it to hw */
1798 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1799 i40e_aqc_opc_list_func_capabilities, NULL);
1800 if (ret != I40E_SUCCESS)
1801 PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
1803 /* Free the temporary buffer after being used */
1810 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1812 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1813 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1814 uint16_t sum_queues = 0, sum_vsis;
1816 /* First check if FW support SRIOV */
1817 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1818 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
1822 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1823 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1824 PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
1825 /* Allocate queues for pf */
1826 if (hw->func_caps.rss) {
1827 pf->flags |= I40E_FLAG_RSS;
1828 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1829 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1830 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1833 sum_queues = pf->lan_nb_qps;
1834 /* Default VSI is not counted in */
1836 PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
1838 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1839 pf->flags |= I40E_FLAG_SRIOV;
1840 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1841 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1842 PMD_INIT_LOG(ERR, "Config VF number %u, "
1843 "max supported %u.\n", dev->pci_dev->max_vfs,
1844 hw->func_caps.num_vfs);
1847 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1848 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1849 "max support %u queues.\n", pf->vf_nb_qps,
1850 I40E_MAX_QP_NUM_PER_VF);
1853 pf->vf_num = dev->pci_dev->max_vfs;
1854 sum_queues += pf->vf_nb_qps * pf->vf_num;
1855 sum_vsis += pf->vf_num;
1856 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
1857 pf->vf_num, pf->vf_nb_qps);
1861 if (hw->func_caps.vmdq) {
1862 pf->flags |= I40E_FLAG_VMDQ;
1863 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1864 sum_queues += pf->vmdq_nb_qps;
1866 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
1869 if (hw->func_caps.fd) {
1870 pf->flags |= I40E_FLAG_FDIR;
1871 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1873 * Each flow director consumes one VSI and one queue,
1874 * but can't calculate out predictably here.
1878 if (sum_vsis > pf->max_num_vsi ||
1879 sum_queues > hw->func_caps.num_rx_qp) {
1880 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
1881 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
1882 pf->max_num_vsi, sum_vsis);
1883 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
1884 hw->func_caps.num_rx_qp, sum_queues);
1888 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
1889 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1890 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
1891 sum_vsis, hw->func_caps.num_msix_vectors);
1894 return I40E_SUCCESS;
1898 i40e_pf_get_switch_config(struct i40e_pf *pf)
1900 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1901 struct i40e_aqc_get_switch_config_resp *switch_config;
1902 struct i40e_aqc_switch_config_element_resp *element;
1903 uint16_t start_seid = 0, num_reported;
1906 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1907 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1908 if (!switch_config) {
1909 PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
1913 /* Get the switch configurations */
1914 ret = i40e_aq_get_switch_config(hw, switch_config,
1915 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1916 if (ret != I40E_SUCCESS) {
1917 PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
1920 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1921 if (num_reported != 1) { /* The number should be 1 */
1922 PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
1926 /* Parse the switch configuration elements */
1927 element = &(switch_config->element[0]);
1928 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1929 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1930 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1932 PMD_DRV_LOG(INFO, "Unknown element type\n");
1935 rte_free(switch_config);
1941 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1944 struct pool_entry *entry;
1946 if (pool == NULL || num == 0)
1949 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1950 if (entry == NULL) {
1951 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1956 /* queue heap initialize */
1957 pool->num_free = num;
1958 pool->num_alloc = 0;
1960 LIST_INIT(&pool->alloc_list);
1961 LIST_INIT(&pool->free_list);
1963 /* Initialize element */
1967 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1972 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1974 struct pool_entry *entry;
1979 LIST_FOREACH(entry, &pool->alloc_list, next) {
1980 LIST_REMOVE(entry, next);
1984 LIST_FOREACH(entry, &pool->free_list, next) {
1985 LIST_REMOVE(entry, next);
1990 pool->num_alloc = 0;
1992 LIST_INIT(&pool->alloc_list);
1993 LIST_INIT(&pool->free_list);
1997 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2000 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2001 uint32_t pool_offset;
2005 PMD_DRV_LOG(ERR, "Invalid parameter\n");
2009 pool_offset = base - pool->base;
2010 /* Lookup in alloc list */
2011 LIST_FOREACH(entry, &pool->alloc_list, next) {
2012 if (entry->base == pool_offset) {
2013 valid_entry = entry;
2014 LIST_REMOVE(entry, next);
2019 /* Not find, return */
2020 if (valid_entry == NULL) {
2021 PMD_DRV_LOG(ERR, "Failed to find entry\n");
2026 * Found it, move it to free list and try to merge.
2027 * In order to make merge easier, always sort it by qbase.
2028 * Find adjacent prev and last entries.
2031 LIST_FOREACH(entry, &pool->free_list, next) {
2032 if (entry->base > valid_entry->base) {
2040 /* Try to merge with next one*/
2042 /* Merge with next one */
2043 if (valid_entry->base + valid_entry->len == next->base) {
2044 next->base = valid_entry->base;
2045 next->len += valid_entry->len;
2046 rte_free(valid_entry);
2053 /* Merge with previous one */
2054 if (prev->base + prev->len == valid_entry->base) {
2055 prev->len += valid_entry->len;
2056 /* If it merge with next one, remove next node */
2058 LIST_REMOVE(valid_entry, next);
2059 rte_free(valid_entry);
2061 rte_free(valid_entry);
2067 /* Not find any entry to merge, insert */
2070 LIST_INSERT_AFTER(prev, valid_entry, next);
2071 else if (next != NULL)
2072 LIST_INSERT_BEFORE(next, valid_entry, next);
2073 else /* It's empty list, insert to head */
2074 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2077 pool->num_free += valid_entry->len;
2078 pool->num_alloc -= valid_entry->len;
2084 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2087 struct pool_entry *entry, *valid_entry;
2089 if (pool == NULL || num == 0) {
2090 PMD_DRV_LOG(ERR, "Invalid parameter\n");
2094 if (pool->num_free < num) {
2095 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
2096 num, pool->num_free);
2101 /* Lookup in free list and find most fit one */
2102 LIST_FOREACH(entry, &pool->free_list, next) {
2103 if (entry->len >= num) {
2105 if (entry->len == num) {
2106 valid_entry = entry;
2109 if (valid_entry == NULL || valid_entry->len > entry->len)
2110 valid_entry = entry;
2114 /* Not find one to satisfy the request, return */
2115 if (valid_entry == NULL) {
2116 PMD_DRV_LOG(ERR, "No valid entry found\n");
2120 * The entry have equal queue number as requested,
2121 * remove it from alloc_list.
2123 if (valid_entry->len == num) {
2124 LIST_REMOVE(valid_entry, next);
2127 * The entry have more numbers than requested,
2128 * create a new entry for alloc_list and minus its
2129 * queue base and number in free_list.
2131 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2132 if (entry == NULL) {
2133 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2137 entry->base = valid_entry->base;
2139 valid_entry->base += num;
2140 valid_entry->len -= num;
2141 valid_entry = entry;
2144 /* Insert it into alloc list, not sorted */
2145 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2147 pool->num_free -= valid_entry->len;
2148 pool->num_alloc += valid_entry->len;
2150 return (valid_entry->base + pool->base);
2154 * bitmap_is_subset - Check whether src2 is subset of src1
2157 bitmap_is_subset(uint8_t src1, uint8_t src2)
2159 return !((src1 ^ src2) & src2);
2163 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2165 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2167 /* If DCB is not supported, only default TC is supported */
2168 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2169 PMD_DRV_LOG(ERR, "DCB is not enabled, "
2170 "only TC0 is supported\n");
2174 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2175 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2176 "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
2180 return I40E_SUCCESS;
2184 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2185 struct i40e_vsi_vlan_pvid_info *info)
2188 struct i40e_vsi_context ctxt;
2189 uint8_t vlan_flags = 0;
2192 if (vsi == NULL || info == NULL) {
2193 PMD_DRV_LOG(ERR, "invalid parameters\n");
2194 return I40E_ERR_PARAM;
2198 vsi->info.pvid = info->config.pvid;
2200 * If insert pvid is enabled, only tagged pkts are
2201 * allowed to be sent out.
2203 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2204 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2207 if (info->config.reject.tagged == 0)
2208 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2210 if (info->config.reject.untagged == 0)
2211 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2213 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2214 I40E_AQ_VSI_PVLAN_MODE_MASK);
2215 vsi->info.port_vlan_flags |= vlan_flags;
2216 vsi->info.valid_sections =
2217 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2218 memset(&ctxt, 0, sizeof(ctxt));
2219 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2220 ctxt.seid = vsi->seid;
2222 hw = I40E_VSI_TO_HW(vsi);
2223 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2224 if (ret != I40E_SUCCESS)
2225 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2231 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2233 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2235 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2237 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2238 if (ret != I40E_SUCCESS)
2242 PMD_DRV_LOG(ERR, "seid not valid\n");
2246 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2247 tc_bw_data.tc_valid_bits = enabled_tcmap;
2248 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2249 tc_bw_data.tc_bw_credits[i] =
2250 (enabled_tcmap & (1 << i)) ? 1 : 0;
2252 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2253 if (ret != I40E_SUCCESS) {
2254 PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
2258 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2259 sizeof(vsi->info.qs_handle));
2260 return I40E_SUCCESS;
2264 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2265 struct i40e_aqc_vsi_properties_data *info,
2266 uint8_t enabled_tcmap)
2268 int ret, total_tc = 0, i;
2269 uint16_t qpnum_per_tc, bsf, qp_idx;
2271 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2272 if (ret != I40E_SUCCESS)
2275 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2276 if (enabled_tcmap & (1 << i))
2278 vsi->enabled_tc = enabled_tcmap;
2280 /* Number of queues per enabled TC */
2281 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2282 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2283 bsf = rte_bsf32(qpnum_per_tc);
2285 /* Adjust the queue number to actual queues that can be applied */
2286 vsi->nb_qps = qpnum_per_tc * total_tc;
2289 * Configure TC and queue mapping parameters, for enabled TC,
2290 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2291 * default queue will serve it.
2294 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2295 if (vsi->enabled_tc & (1 << i)) {
2296 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2297 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2298 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2299 qp_idx += qpnum_per_tc;
2301 info->tc_mapping[i] = 0;
2304 /* Associate queue number with VSI */
2305 if (vsi->type == I40E_VSI_SRIOV) {
2306 info->mapping_flags |=
2307 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2308 for (i = 0; i < vsi->nb_qps; i++)
2309 info->queue_mapping[i] =
2310 rte_cpu_to_le_16(vsi->base_queue + i);
2312 info->mapping_flags |=
2313 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2314 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2316 info->valid_sections =
2317 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2319 return I40E_SUCCESS;
2323 i40e_veb_release(struct i40e_veb *veb)
2325 struct i40e_vsi *vsi;
2328 if (veb == NULL || veb->associate_vsi == NULL)
2331 if (!TAILQ_EMPTY(&veb->head)) {
2332 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
2336 vsi = veb->associate_vsi;
2337 hw = I40E_VSI_TO_HW(vsi);
2339 vsi->uplink_seid = veb->uplink_seid;
2340 i40e_aq_delete_element(hw, veb->seid, NULL);
2343 return I40E_SUCCESS;
2347 static struct i40e_veb *
2348 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2350 struct i40e_veb *veb;
2354 if (NULL == pf || vsi == NULL) {
2355 PMD_DRV_LOG(ERR, "veb setup failed, "
2356 "associated VSI shouldn't null\n");
2359 hw = I40E_PF_TO_HW(pf);
2361 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2363 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
2367 veb->associate_vsi = vsi;
2368 TAILQ_INIT(&veb->head);
2369 veb->uplink_seid = vsi->uplink_seid;
2371 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2372 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2374 if (ret != I40E_SUCCESS) {
2375 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
2376 hw->aq.asq_last_status);
2380 /* get statistics index */
2381 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2382 &veb->stats_idx, NULL, NULL, NULL);
2383 if (ret != I40E_SUCCESS) {
2384 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
2385 hw->aq.asq_last_status);
2389 /* Get VEB bandwidth, to be implemented */
2390 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2391 vsi->uplink_seid = veb->seid;
2400 i40e_vsi_release(struct i40e_vsi *vsi)
2404 struct i40e_vsi_list *vsi_list;
2406 struct i40e_mac_filter *f;
2409 return I40E_SUCCESS;
2411 pf = I40E_VSI_TO_PF(vsi);
2412 hw = I40E_VSI_TO_HW(vsi);
2414 /* VSI has child to attach, release child first */
2416 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2417 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2419 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2421 i40e_veb_release(vsi->veb);
2424 /* Remove all macvlan filters of the VSI */
2425 i40e_vsi_remove_all_macvlan_filter(vsi);
2426 TAILQ_FOREACH(f, &vsi->mac_list, next)
2429 if (vsi->type != I40E_VSI_MAIN) {
2430 /* Remove vsi from parent's sibling list */
2431 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2432 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
2433 return I40E_ERR_PARAM;
2435 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2436 &vsi->sib_vsi_list, list);
2438 /* Remove all switch element of the VSI */
2439 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2440 if (ret != I40E_SUCCESS)
2441 PMD_DRV_LOG(ERR, "Failed to delete element\n");
2443 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2445 if (vsi->type != I40E_VSI_SRIOV)
2446 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2449 return I40E_SUCCESS;
2453 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2455 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2456 struct i40e_aqc_remove_macvlan_element_data def_filter;
2459 if (vsi->type != I40E_VSI_MAIN)
2460 return I40E_ERR_CONFIG;
2461 memset(&def_filter, 0, sizeof(def_filter));
2462 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2464 def_filter.vlan_tag = 0;
2465 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2466 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2467 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2468 if (ret != I40E_SUCCESS) {
2469 struct i40e_mac_filter *f;
2471 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2472 "macvlan filter\n");
2473 /* It needs to add the permanent mac into mac list */
2474 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2476 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
2477 return I40E_ERR_NO_MEMORY;
2479 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2481 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2487 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2491 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2493 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2494 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2495 struct i40e_hw *hw = &vsi->adapter->hw;
2499 memset(&bw_config, 0, sizeof(bw_config));
2500 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2501 if (ret != I40E_SUCCESS) {
2502 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
2503 "configuration %u\n", hw->aq.asq_last_status);
2507 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2508 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2509 &ets_sla_config, NULL);
2510 if (ret != I40E_SUCCESS) {
2511 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2512 "configuration %u\n", hw->aq.asq_last_status);
2516 /* Not store the info yet, just print out */
2517 PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
2518 PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
2519 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2520 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
2521 ets_sla_config.share_credits[i]);
2522 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
2523 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2524 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2525 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2534 i40e_vsi_setup(struct i40e_pf *pf,
2535 enum i40e_vsi_type type,
2536 struct i40e_vsi *uplink_vsi,
2537 uint16_t user_param)
2539 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2540 struct i40e_vsi *vsi;
2542 struct i40e_vsi_context ctxt;
2543 struct ether_addr broadcast =
2544 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2546 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2547 PMD_DRV_LOG(ERR, "VSI setup failed, "
2548 "VSI link shouldn't be NULL\n");
2552 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2553 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2554 "uplink VSI should be NULL\n");
2558 /* If uplink vsi didn't setup VEB, create one first */
2559 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2560 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2562 if (NULL == uplink_vsi->veb) {
2563 PMD_DRV_LOG(ERR, "VEB setup failed\n");
2568 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2570 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
2573 TAILQ_INIT(&vsi->mac_list);
2575 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2576 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2577 vsi->parent_vsi = uplink_vsi;
2578 vsi->user_param = user_param;
2579 /* Allocate queues */
2580 switch (vsi->type) {
2581 case I40E_VSI_MAIN :
2582 vsi->nb_qps = pf->lan_nb_qps;
2584 case I40E_VSI_SRIOV :
2585 vsi->nb_qps = pf->vf_nb_qps;
2590 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2592 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2596 vsi->base_queue = ret;
2598 /* VF has MSIX interrupt in VF range, don't allocate here */
2599 if (type != I40E_VSI_SRIOV) {
2600 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2602 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2603 goto fail_queue_alloc;
2605 vsi->msix_intr = ret;
2609 if (type == I40E_VSI_MAIN) {
2610 /* For main VSI, no need to add since it's default one */
2611 vsi->uplink_seid = pf->mac_seid;
2612 vsi->seid = pf->main_vsi_seid;
2613 /* Bind queues with specific MSIX interrupt */
2615 * Needs 2 interrupt at least, one for misc cause which will
2616 * enabled from OS side, Another for queues binding the
2617 * interrupt from device side only.
2620 /* Get default VSI parameters from hardware */
2621 memset(&ctxt, 0, sizeof(ctxt));
2622 ctxt.seid = vsi->seid;
2623 ctxt.pf_num = hw->pf_id;
2624 ctxt.uplink_seid = vsi->uplink_seid;
2626 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2627 if (ret != I40E_SUCCESS) {
2628 PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
2629 goto fail_msix_alloc;
2631 (void)rte_memcpy(&vsi->info, &ctxt.info,
2632 sizeof(struct i40e_aqc_vsi_properties_data));
2633 vsi->vsi_id = ctxt.vsi_number;
2634 vsi->info.valid_sections = 0;
2636 /* Configure tc, enabled TC0 only */
2637 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2639 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
2640 goto fail_msix_alloc;
2643 /* TC, queue mapping */
2644 memset(&ctxt, 0, sizeof(ctxt));
2645 vsi->info.valid_sections |=
2646 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2647 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2648 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2649 (void)rte_memcpy(&ctxt.info, &vsi->info,
2650 sizeof(struct i40e_aqc_vsi_properties_data));
2651 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2652 I40E_DEFAULT_TCMAP);
2653 if (ret != I40E_SUCCESS) {
2654 PMD_DRV_LOG(ERR, "Failed to configure "
2655 "TC queue mapping\n");
2656 goto fail_msix_alloc;
2658 ctxt.seid = vsi->seid;
2659 ctxt.pf_num = hw->pf_id;
2660 ctxt.uplink_seid = vsi->uplink_seid;
2663 /* Update VSI parameters */
2664 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2665 if (ret != I40E_SUCCESS) {
2666 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2667 goto fail_msix_alloc;
2670 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2671 sizeof(vsi->info.tc_mapping));
2672 (void)rte_memcpy(&vsi->info.queue_mapping,
2673 &ctxt.info.queue_mapping,
2674 sizeof(vsi->info.queue_mapping));
2675 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2676 vsi->info.valid_sections = 0;
2678 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2682 * Updating default filter settings are necessary to prevent
2683 * reception of tagged packets.
2684 * Some old firmware configurations load a default macvlan
2685 * filter which accepts both tagged and untagged packets.
2686 * The updating is to use a normal filter instead if needed.
2687 * For NVM 4.2.2 or after, the updating is not needed anymore.
2688 * The firmware with correct configurations load the default
2689 * macvlan filter which is expected and cannot be removed.
2691 i40e_update_default_filter_setting(vsi);
2692 } else if (type == I40E_VSI_SRIOV) {
2693 memset(&ctxt, 0, sizeof(ctxt));
2695 * For other VSI, the uplink_seid equals to uplink VSI's
2696 * uplink_seid since they share same VEB
2698 vsi->uplink_seid = uplink_vsi->uplink_seid;
2699 ctxt.pf_num = hw->pf_id;
2700 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2701 ctxt.uplink_seid = vsi->uplink_seid;
2702 ctxt.connection_type = 0x1;
2703 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2705 /* Configure switch ID */
2706 ctxt.info.valid_sections |=
2707 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2708 ctxt.info.switch_id =
2709 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2710 /* Configure port/vlan */
2711 ctxt.info.valid_sections |=
2712 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2713 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2714 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2715 I40E_DEFAULT_TCMAP);
2716 if (ret != I40E_SUCCESS) {
2717 PMD_DRV_LOG(ERR, "Failed to configure "
2718 "TC queue mapping\n");
2719 goto fail_msix_alloc;
2721 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2722 ctxt.info.valid_sections |=
2723 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2725 * Since VSI is not created yet, only configure parameter,
2726 * will add vsi below.
2730 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
2731 goto fail_msix_alloc;
2734 if (vsi->type != I40E_VSI_MAIN) {
2735 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2737 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
2738 hw->aq.asq_last_status);
2739 goto fail_msix_alloc;
2741 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2742 vsi->info.valid_sections = 0;
2743 vsi->seid = ctxt.seid;
2744 vsi->vsi_id = ctxt.vsi_number;
2745 vsi->sib_vsi_list.vsi = vsi;
2746 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2747 &vsi->sib_vsi_list, list);
2750 /* MAC/VLAN configuration */
2751 ret = i40e_vsi_add_mac(vsi, &broadcast);
2752 if (ret != I40E_SUCCESS) {
2753 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
2754 goto fail_msix_alloc;
2757 /* Get VSI BW information */
2758 i40e_vsi_dump_bw_config(vsi);
2761 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2763 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2769 /* Configure vlan stripping on or off */
2771 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2773 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2774 struct i40e_vsi_context ctxt;
2776 int ret = I40E_SUCCESS;
2778 /* Check if it has been already on or off */
2779 if (vsi->info.valid_sections &
2780 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2782 if ((vsi->info.port_vlan_flags &
2783 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2784 return 0; /* already on */
2786 if ((vsi->info.port_vlan_flags &
2787 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2788 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2789 return 0; /* already off */
2794 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2796 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2797 vsi->info.valid_sections =
2798 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2799 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2800 vsi->info.port_vlan_flags |= vlan_flags;
2801 ctxt.seid = vsi->seid;
2802 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2803 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2805 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
2806 on ? "enable" : "disable");
2812 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2814 struct rte_eth_dev_data *data = dev->data;
2817 /* Apply vlan offload setting */
2818 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2820 /* Apply double-vlan setting, not implemented yet */
2822 /* Apply pvid setting */
2823 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2824 data->dev_conf.txmode.hw_vlan_insert_pvid);
2826 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
2832 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2834 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2836 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2840 i40e_update_flow_control(struct i40e_hw *hw)
2842 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2843 struct i40e_link_status link_status;
2844 uint32_t rxfc = 0, txfc = 0, reg;
2848 memset(&link_status, 0, sizeof(link_status));
2849 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2850 if (ret != I40E_SUCCESS) {
2851 PMD_DRV_LOG(ERR, "Failed to get link status information\n");
2852 goto write_reg; /* Disable flow control */
2855 an_info = hw->phy.link_info.an_info;
2856 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2857 PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
2858 ret = I40E_ERR_NOT_READY;
2859 goto write_reg; /* Disable flow control */
2862 * If link auto negotiation is enabled, flow control needs to
2863 * be configured according to it
2865 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2866 case I40E_LINK_PAUSE_RXTX:
2869 hw->fc.current_mode = I40E_FC_FULL;
2871 case I40E_AQ_LINK_PAUSE_RX:
2873 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2875 case I40E_AQ_LINK_PAUSE_TX:
2877 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2880 hw->fc.current_mode = I40E_FC_NONE;
2885 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2886 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2887 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2888 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2889 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2890 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2897 i40e_pf_setup(struct i40e_pf *pf)
2899 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2900 struct i40e_filter_control_settings settings;
2901 struct rte_eth_dev_data *dev_data = pf->dev_data;
2902 struct i40e_vsi *vsi;
2905 /* Clear all stats counters */
2906 pf->offset_loaded = FALSE;
2907 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2908 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2910 ret = i40e_pf_get_switch_config(pf);
2911 if (ret != I40E_SUCCESS) {
2912 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2917 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2919 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2920 return I40E_ERR_NOT_READY;
2923 dev_data->nb_rx_queues = vsi->nb_qps;
2924 dev_data->nb_tx_queues = vsi->nb_qps;
2926 /* Configure filter control */
2927 memset(&settings, 0, sizeof(settings));
2928 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2929 /* Enable ethtype and macvlan filters */
2930 settings.enable_ethtype = TRUE;
2931 settings.enable_macvlan = TRUE;
2932 ret = i40e_set_filter_control(hw, &settings);
2934 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2937 /* Update flow control according to the auto negotiation */
2938 i40e_update_flow_control(hw);
2940 return I40E_SUCCESS;
2944 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2950 * Set or clear TX Queue Disable flags,
2951 * which is required by hardware.
2953 i40e_pre_tx_queue_cfg(hw, q_idx, on);
2954 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
2956 /* Wait until the request is finished */
2957 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2958 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2959 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2960 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2961 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2967 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2968 return I40E_SUCCESS; /* already on, skip next steps */
2970 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
2971 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2973 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2974 return I40E_SUCCESS; /* already off, skip next steps */
2975 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2977 /* Write the register */
2978 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2979 /* Check the result */
2980 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2981 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2982 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2984 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2985 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2988 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2989 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2993 /* Check if it is timeout */
2994 if (j >= I40E_CHK_Q_ENA_COUNT) {
2995 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
2996 (on ? "enable" : "disable"), q_idx);
2997 return I40E_ERR_TIMEOUT;
3000 return I40E_SUCCESS;
3003 /* Swith on or off the tx queues */
3005 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3007 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3008 struct i40e_tx_queue *txq;
3009 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3013 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3014 txq = dev_data->tx_queues[i];
3015 /* Don't operate the queue if not configured or
3016 * if starting only per queue */
3017 if (!txq->q_set || (on && txq->start_tx_per_q))
3020 ret = i40e_dev_tx_queue_start(dev, i);
3022 ret = i40e_dev_tx_queue_stop(dev, i);
3023 if ( ret != I40E_SUCCESS)
3027 return I40E_SUCCESS;
3031 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3036 /* Wait until the request is finished */
3037 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3038 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3039 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3040 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3041 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3046 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3047 return I40E_SUCCESS; /* Already on, skip next steps */
3048 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3050 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3051 return I40E_SUCCESS; /* Already off, skip next steps */
3052 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3055 /* Write the register */
3056 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3057 /* Check the result */
3058 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3059 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3060 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3062 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3063 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3066 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3067 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3072 /* Check if it is timeout */
3073 if (j >= I40E_CHK_Q_ENA_COUNT) {
3074 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
3075 (on ? "enable" : "disable"), q_idx);
3076 return I40E_ERR_TIMEOUT;
3079 return I40E_SUCCESS;
3081 /* Switch on or off the rx queues */
3083 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3085 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3086 struct i40e_rx_queue *rxq;
3087 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3091 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3092 rxq = dev_data->rx_queues[i];
3093 /* Don't operate the queue if not configured or
3094 * if starting only per queue */
3095 if (!rxq->q_set || (on && rxq->start_rx_per_q))
3098 ret = i40e_dev_rx_queue_start(dev, i);
3100 ret = i40e_dev_rx_queue_stop(dev, i);
3101 if (ret != I40E_SUCCESS)
3105 return I40E_SUCCESS;
3108 /* Switch on or off all the rx/tx queues */
3110 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3115 /* enable rx queues before enabling tx queues */
3116 ret = i40e_vsi_switch_rx_queues(vsi, on);
3118 PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
3121 ret = i40e_vsi_switch_tx_queues(vsi, on);
3123 /* Stop tx queues before stopping rx queues */
3124 ret = i40e_vsi_switch_tx_queues(vsi, on);
3126 PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
3129 ret = i40e_vsi_switch_rx_queues(vsi, on);
3135 /* Initialize VSI for TX */
3137 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3139 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3140 struct rte_eth_dev_data *data = pf->dev_data;
3142 uint32_t ret = I40E_SUCCESS;
3144 for (i = 0; i < data->nb_tx_queues; i++) {
3145 ret = i40e_tx_queue_init(data->tx_queues[i]);
3146 if (ret != I40E_SUCCESS)
3153 /* Initialize VSI for RX */
3155 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3157 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3158 struct rte_eth_dev_data *data = pf->dev_data;
3159 int ret = I40E_SUCCESS;
3162 i40e_pf_config_mq_rx(pf);
3163 for (i = 0; i < data->nb_rx_queues; i++) {
3164 ret = i40e_rx_queue_init(data->rx_queues[i]);
3165 if (ret != I40E_SUCCESS) {
3166 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3167 "initialization\n");
3175 /* Initialize VSI */
3177 i40e_vsi_init(struct i40e_vsi *vsi)
3181 err = i40e_vsi_tx_init(vsi);
3183 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
3186 err = i40e_vsi_rx_init(vsi);
3188 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
3196 i40e_stat_update_32(struct i40e_hw *hw,
3204 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3208 if (new_data >= *offset)
3209 *stat = (uint64_t)(new_data - *offset);
3211 *stat = (uint64_t)((new_data +
3212 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3216 i40e_stat_update_48(struct i40e_hw *hw,
3225 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3226 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3227 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3232 if (new_data >= *offset)
3233 *stat = new_data - *offset;
3235 *stat = (uint64_t)((new_data +
3236 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3238 *stat &= I40E_48_BIT_MASK;
3243 i40e_pf_disable_irq0(struct i40e_hw *hw)
3245 /* Disable all interrupt types */
3246 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3247 I40E_WRITE_FLUSH(hw);
3252 i40e_pf_enable_irq0(struct i40e_hw *hw)
3254 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3255 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3256 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3257 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3258 I40E_WRITE_FLUSH(hw);
3262 i40e_pf_config_irq0(struct i40e_hw *hw)
3266 /* read pending request and disable first */
3267 i40e_pf_disable_irq0(hw);
3269 * Enable all interrupt error options to detect possible errors,
3270 * other informative int are ignored
3272 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3273 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3274 I40E_PFINT_ICR0_ENA_GRST_MASK |
3275 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3276 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3277 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3278 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3279 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3281 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3282 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3283 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3285 /* Link no queues with irq0 */
3286 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3287 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3291 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3293 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3294 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3297 uint32_t index, offset, val;
3302 * Try to find which VF trigger a reset, use absolute VF id to access
3303 * since the reg is global register.
3305 for (i = 0; i < pf->vf_num; i++) {
3306 abs_vf_id = hw->func_caps.vf_base_id + i;
3307 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3308 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3309 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3310 /* VFR event occured */
3311 if (val & (0x1 << offset)) {
3314 /* Clear the event first */
3315 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3317 PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
3319 * Only notify a VF reset event occured,
3320 * don't trigger another SW reset
3322 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3323 if (ret != I40E_SUCCESS)
3324 PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
3330 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3332 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3333 struct i40e_arq_event_info info;
3334 uint16_t pending, opcode;
3337 info.msg_size = I40E_AQ_BUF_SZ;
3338 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3339 if (!info.msg_buf) {
3340 PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
3346 ret = i40e_clean_arq_element(hw, &info, &pending);
3348 if (ret != I40E_SUCCESS) {
3349 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3350 "aq_err: %u\n", hw->aq.asq_last_status);
3353 opcode = rte_le_to_cpu_16(info.desc.opcode);
3356 case i40e_aqc_opc_send_msg_to_pf:
3357 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3358 i40e_pf_host_handle_vf_msg(dev,
3359 rte_le_to_cpu_16(info.desc.retval),
3360 rte_le_to_cpu_32(info.desc.cookie_high),
3361 rte_le_to_cpu_32(info.desc.cookie_low),
3366 PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
3370 /* Reset the buffer after processing one */
3371 info.msg_size = I40E_AQ_BUF_SZ;
3373 rte_free(info.msg_buf);
3377 * Interrupt handler triggered by NIC for handling
3378 * specific interrupt.
3381 * Pointer to interrupt handle.
3383 * The address of parameter (struct rte_eth_dev *) regsitered before.
3389 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3392 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3393 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3394 uint32_t cause, enable;
3396 i40e_pf_disable_irq0(hw);
3398 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3399 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3401 /* Shared IRQ case, return */
3402 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3403 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3404 "no INT event to process\n", hw->pf_id);
3408 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3409 PMD_DRV_LOG(INFO, "INT:Link status changed\n");
3410 i40e_dev_link_update(dev, 0);
3413 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3414 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
3416 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3417 PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
3419 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3420 PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
3422 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3423 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
3425 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3426 PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
3428 /* Add processing func to deal with VF reset vent */
3429 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3430 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3431 i40e_dev_handle_vfr_event(dev);
3433 /* Find admin queue event */
3434 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3435 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3436 i40e_dev_handle_aq_msg(dev);
3440 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3441 /* Re-enable interrupt from device side */
3442 i40e_pf_enable_irq0(hw);
3443 /* Re-enable interrupt from host side */
3444 rte_intr_enable(&(dev->pci_dev->intr_handle));
3448 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3449 struct i40e_macvlan_filter *filter,
3452 int ele_num, ele_buff_size;
3453 int num, actual_num, i;
3454 int ret = I40E_SUCCESS;
3455 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3456 struct i40e_aqc_add_macvlan_element_data *req_list;
3458 if (filter == NULL || total == 0)
3459 return I40E_ERR_PARAM;
3460 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3461 ele_buff_size = hw->aq.asq_buf_size;
3463 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3464 if (req_list == NULL) {
3465 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3466 return I40E_ERR_NO_MEMORY;
3471 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3472 memset(req_list, 0, ele_buff_size);
3474 for (i = 0; i < actual_num; i++) {
3475 (void)rte_memcpy(req_list[i].mac_addr,
3476 &filter[num + i].macaddr, ETH_ADDR_LEN);
3477 req_list[i].vlan_tag =
3478 rte_cpu_to_le_16(filter[num + i].vlan_id);
3479 req_list[i].flags = rte_cpu_to_le_16(\
3480 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3481 req_list[i].queue_number = 0;
3484 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3486 if (ret != I40E_SUCCESS) {
3487 PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
3491 } while (num < total);
3499 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3500 struct i40e_macvlan_filter *filter,
3503 int ele_num, ele_buff_size;
3504 int num, actual_num, i;
3505 int ret = I40E_SUCCESS;
3506 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3507 struct i40e_aqc_remove_macvlan_element_data *req_list;
3509 if (filter == NULL || total == 0)
3510 return I40E_ERR_PARAM;
3512 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3513 ele_buff_size = hw->aq.asq_buf_size;
3515 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3516 if (req_list == NULL) {
3517 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3518 return I40E_ERR_NO_MEMORY;
3523 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3524 memset(req_list, 0, ele_buff_size);
3526 for (i = 0; i < actual_num; i++) {
3527 (void)rte_memcpy(req_list[i].mac_addr,
3528 &filter[num + i].macaddr, ETH_ADDR_LEN);
3529 req_list[i].vlan_tag =
3530 rte_cpu_to_le_16(filter[num + i].vlan_id);
3531 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3534 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3536 if (ret != I40E_SUCCESS) {
3537 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
3541 } while (num < total);
3548 /* Find out specific MAC filter */
3549 static struct i40e_mac_filter *
3550 i40e_find_mac_filter(struct i40e_vsi *vsi,
3551 struct ether_addr *macaddr)
3553 struct i40e_mac_filter *f;
3555 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3556 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3564 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3567 uint32_t vid_idx, vid_bit;
3569 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3570 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3572 if (vsi->vfta[vid_idx] & vid_bit)
3579 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3580 uint16_t vlan_id, bool on)
3582 uint32_t vid_idx, vid_bit;
3584 #define UINT32_BIT_MASK 0x1F
3585 #define VALID_VLAN_BIT_MASK 0xFFF
3586 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3587 * element first, then find the bits it belongs to
3589 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3591 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3594 vsi->vfta[vid_idx] |= vid_bit;
3596 vsi->vfta[vid_idx] &= ~vid_bit;
3600 * Find all vlan options for specific mac addr,
3601 * return with actual vlan found.
3604 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3605 struct i40e_macvlan_filter *mv_f,
3606 int num, struct ether_addr *addr)
3612 * Not to use i40e_find_vlan_filter to decrease the loop time,
3613 * although the code looks complex.
3615 if (num < vsi->vlan_num)
3616 return I40E_ERR_PARAM;
3619 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3621 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3622 if (vsi->vfta[j] & (1 << k)) {
3624 PMD_DRV_LOG(ERR, "vlan number "
3626 return I40E_ERR_PARAM;
3628 (void)rte_memcpy(&mv_f[i].macaddr,
3629 addr, ETH_ADDR_LEN);
3631 j * I40E_UINT32_BIT_SIZE + k;
3637 return I40E_SUCCESS;
3641 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3642 struct i40e_macvlan_filter *mv_f,
3647 struct i40e_mac_filter *f;
3649 if (num < vsi->mac_num)
3650 return I40E_ERR_PARAM;
3652 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3654 PMD_DRV_LOG(ERR, "buffer number not match\n");
3655 return I40E_ERR_PARAM;
3657 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3658 mv_f[i].vlan_id = vlan;
3662 return I40E_SUCCESS;
3666 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3669 struct i40e_mac_filter *f;
3670 struct i40e_macvlan_filter *mv_f;
3671 int ret = I40E_SUCCESS;
3673 if (vsi == NULL || vsi->mac_num == 0)
3674 return I40E_ERR_PARAM;
3676 /* Case that no vlan is set */
3677 if (vsi->vlan_num == 0)
3680 num = vsi->mac_num * vsi->vlan_num;
3682 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3684 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3685 return I40E_ERR_NO_MEMORY;
3689 if (vsi->vlan_num == 0) {
3690 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3691 (void)rte_memcpy(&mv_f[i].macaddr,
3692 &f->macaddr, ETH_ADDR_LEN);
3693 mv_f[i].vlan_id = 0;
3697 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3698 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3699 vsi->vlan_num, &f->macaddr);
3700 if (ret != I40E_SUCCESS)
3706 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3714 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3716 struct i40e_macvlan_filter *mv_f;
3718 int ret = I40E_SUCCESS;
3720 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3721 return I40E_ERR_PARAM;
3723 /* If it's already set, just return */
3724 if (i40e_find_vlan_filter(vsi,vlan))
3725 return I40E_SUCCESS;
3727 mac_num = vsi->mac_num;
3730 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3731 return I40E_ERR_PARAM;
3734 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3737 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3738 return I40E_ERR_NO_MEMORY;
3741 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3743 if (ret != I40E_SUCCESS)
3746 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3748 if (ret != I40E_SUCCESS)
3751 i40e_set_vlan_filter(vsi, vlan, 1);
3761 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3763 struct i40e_macvlan_filter *mv_f;
3765 int ret = I40E_SUCCESS;
3768 * Vlan 0 is the generic filter for untagged packets
3769 * and can't be removed.
3771 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3772 return I40E_ERR_PARAM;
3774 /* If can't find it, just return */
3775 if (!i40e_find_vlan_filter(vsi, vlan))
3776 return I40E_ERR_PARAM;
3778 mac_num = vsi->mac_num;
3781 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3782 return I40E_ERR_PARAM;
3785 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3788 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3789 return I40E_ERR_NO_MEMORY;
3792 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3794 if (ret != I40E_SUCCESS)
3797 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3799 if (ret != I40E_SUCCESS)
3802 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3803 if (vsi->vlan_num == 1) {
3804 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3805 if (ret != I40E_SUCCESS)
3808 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3809 if (ret != I40E_SUCCESS)
3813 i40e_set_vlan_filter(vsi, vlan, 0);
3823 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3825 struct i40e_mac_filter *f;
3826 struct i40e_macvlan_filter *mv_f;
3828 int ret = I40E_SUCCESS;
3830 /* If it's add and we've config it, return */
3831 f = i40e_find_mac_filter(vsi, addr);
3833 return I40E_SUCCESS;
3836 * If vlan_num is 0, that's the first time to add mac,
3837 * set mask for vlan_id 0.
3839 if (vsi->vlan_num == 0) {
3840 i40e_set_vlan_filter(vsi, 0, 1);
3844 vlan_num = vsi->vlan_num;
3846 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3848 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3849 return I40E_ERR_NO_MEMORY;
3852 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3853 if (ret != I40E_SUCCESS)
3856 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3857 if (ret != I40E_SUCCESS)
3860 /* Add the mac addr into mac list */
3861 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3863 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3864 ret = I40E_ERR_NO_MEMORY;
3867 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3868 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3879 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3881 struct i40e_mac_filter *f;
3882 struct i40e_macvlan_filter *mv_f;
3884 int ret = I40E_SUCCESS;
3886 /* Can't find it, return an error */
3887 f = i40e_find_mac_filter(vsi, addr);
3889 return I40E_ERR_PARAM;
3891 vlan_num = vsi->vlan_num;
3892 if (vlan_num == 0) {
3893 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
3894 return I40E_ERR_PARAM;
3896 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3898 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3899 return I40E_ERR_NO_MEMORY;
3902 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3903 if (ret != I40E_SUCCESS)
3906 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3907 if (ret != I40E_SUCCESS)
3910 /* Remove the mac addr into mac list */
3911 TAILQ_REMOVE(&vsi->mac_list, f, next);
3921 /* Configure hash enable flags for RSS */
3923 i40e_config_hena(uint64_t flags)
3930 if (flags & ETH_RSS_NONF_IPV4_UDP)
3931 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3932 if (flags & ETH_RSS_NONF_IPV4_TCP)
3933 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3934 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3935 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3936 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3937 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3938 if (flags & ETH_RSS_FRAG_IPV4)
3939 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3940 if (flags & ETH_RSS_NONF_IPV6_UDP)
3941 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3942 if (flags & ETH_RSS_NONF_IPV6_TCP)
3943 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3944 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3945 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3946 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3947 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3948 if (flags & ETH_RSS_FRAG_IPV6)
3949 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3950 if (flags & ETH_RSS_L2_PAYLOAD)
3951 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3956 /* Parse the hash enable flags */
3958 i40e_parse_hena(uint64_t flags)
3960 uint64_t rss_hf = 0;
3965 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3966 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3967 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3968 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3969 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3970 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3971 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3972 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3973 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3974 rss_hf |= ETH_RSS_FRAG_IPV4;
3975 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3976 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3977 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3978 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3979 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3980 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3981 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3982 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3983 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3984 rss_hf |= ETH_RSS_FRAG_IPV6;
3985 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3986 rss_hf |= ETH_RSS_L2_PAYLOAD;
3993 i40e_pf_disable_rss(struct i40e_pf *pf)
3995 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3998 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3999 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4000 hena &= ~I40E_RSS_HENA_ALL;
4001 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4002 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4003 I40E_WRITE_FLUSH(hw);
4007 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4010 uint8_t hash_key_len;
4015 hash_key = (uint32_t *)(rss_conf->rss_key);
4016 hash_key_len = rss_conf->rss_key_len;
4017 if (hash_key != NULL && hash_key_len >=
4018 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4019 /* Fill in RSS hash key */
4020 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4021 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4024 rss_hf = rss_conf->rss_hf;
4025 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4026 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4027 hena &= ~I40E_RSS_HENA_ALL;
4028 hena |= i40e_config_hena(rss_hf);
4029 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4030 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4031 I40E_WRITE_FLUSH(hw);
4037 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4038 struct rte_eth_rss_conf *rss_conf)
4040 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4041 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4044 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4045 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4046 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4047 if (rss_hf != 0) /* Enable RSS */
4049 return 0; /* Nothing to do */
4052 if (rss_hf == 0) /* Disable RSS */
4055 return i40e_hw_rss_hash_set(hw, rss_conf);
4059 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4060 struct rte_eth_rss_conf *rss_conf)
4062 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4063 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4067 if (hash_key != NULL) {
4068 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4069 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4070 rss_conf->rss_key_len = i * sizeof(uint32_t);
4072 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4073 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4074 rss_conf->rss_hf = i40e_parse_hena(hena);
4081 i40e_pf_config_rss(struct i40e_pf *pf)
4083 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4084 struct rte_eth_rss_conf rss_conf;
4085 uint32_t i, lut = 0;
4086 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4088 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4091 lut = (lut << 8) | (j & ((0x1 <<
4092 hw->func_caps.rss_table_entry_width) - 1));
4094 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4097 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4098 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4099 i40e_pf_disable_rss(pf);
4102 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4103 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4104 /* Calculate the default hash key */
4105 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4106 rss_key_default[i] = (uint32_t)rte_rand();
4107 rss_conf.rss_key = (uint8_t *)rss_key_default;
4108 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4112 return i40e_hw_rss_hash_set(hw, &rss_conf);
4116 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4118 if (!pf->dev_data->sriov.active) {
4119 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4121 i40e_pf_config_rss(pf);
4124 i40e_pf_disable_rss(pf);