4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_RSS_OFFLOAD_ALL ( \
89 ETH_RSS_NONF_IPV4_UDP | \
90 ETH_RSS_NONF_IPV4_TCP | \
91 ETH_RSS_NONF_IPV4_SCTP | \
92 ETH_RSS_NONF_IPV4_OTHER | \
94 ETH_RSS_NONF_IPV6_UDP | \
95 ETH_RSS_NONF_IPV6_TCP | \
96 ETH_RSS_NONF_IPV6_SCTP | \
97 ETH_RSS_NONF_IPV6_OTHER | \
101 /* All bits of RSS hash enable */
102 #define I40E_RSS_HENA_ALL ( \
103 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
104 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
107 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
109 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
112 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
113 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
114 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
116 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
118 static int eth_i40e_dev_init(\
119 __attribute__((unused)) struct eth_driver *eth_drv,
120 struct rte_eth_dev *eth_dev);
121 static int i40e_dev_configure(struct rte_eth_dev *dev);
122 static int i40e_dev_start(struct rte_eth_dev *dev);
123 static void i40e_dev_stop(struct rte_eth_dev *dev);
124 static void i40e_dev_close(struct rte_eth_dev *dev);
125 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
126 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
127 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
130 struct rte_eth_stats *stats);
131 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
132 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
136 static void i40e_dev_info_get(struct rte_eth_dev *dev,
137 struct rte_eth_dev_info *dev_info);
138 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
141 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
142 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
143 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
146 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
147 static int i40e_dev_led_on(struct rte_eth_dev *dev);
148 static int i40e_dev_led_off(struct rte_eth_dev *dev);
149 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
150 struct rte_eth_fc_conf *fc_conf);
151 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
152 struct rte_eth_pfc_conf *pfc_conf);
153 static void i40e_macaddr_add(struct rte_eth_dev *dev,
154 struct ether_addr *mac_addr,
157 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
158 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
159 struct rte_eth_rss_reta *reta_conf);
160 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
161 struct rte_eth_rss_reta *reta_conf);
163 static int i40e_get_cap(struct i40e_hw *hw);
164 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
165 static int i40e_pf_setup(struct i40e_pf *pf);
166 static int i40e_vsi_init(struct i40e_vsi *vsi);
167 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
168 bool offset_loaded, uint64_t *offset, uint64_t *stat);
169 static void i40e_stat_update_48(struct i40e_hw *hw,
175 static void i40e_pf_config_irq0(struct i40e_hw *hw);
176 static void i40e_dev_interrupt_handler(
177 __rte_unused struct rte_intr_handle *handle, void *param);
178 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
179 uint32_t base, uint32_t num);
180 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
181 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
183 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
185 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
186 static int i40e_veb_release(struct i40e_veb *veb);
187 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
188 struct i40e_vsi *vsi);
189 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
190 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
191 static int i40e_pf_disable_all_queues(struct i40e_hw *hw);
192 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
193 struct i40e_macvlan_filter *mv_f,
195 struct ether_addr *addr);
196 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
197 struct i40e_macvlan_filter *mv_f,
200 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
201 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
202 struct rte_eth_rss_conf *rss_conf);
203 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
204 struct rte_eth_rss_conf *rss_conf);
206 /* Default hash key buffer for RSS */
207 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
209 static struct rte_pci_id pci_id_i40e_map[] = {
210 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
211 #include "rte_pci_dev_ids.h"
212 { .vendor_id = 0, /* sentinel */ },
215 static struct eth_dev_ops i40e_eth_dev_ops = {
216 .dev_configure = i40e_dev_configure,
217 .dev_start = i40e_dev_start,
218 .dev_stop = i40e_dev_stop,
219 .dev_close = i40e_dev_close,
220 .promiscuous_enable = i40e_dev_promiscuous_enable,
221 .promiscuous_disable = i40e_dev_promiscuous_disable,
222 .allmulticast_enable = i40e_dev_allmulticast_enable,
223 .allmulticast_disable = i40e_dev_allmulticast_disable,
224 .link_update = i40e_dev_link_update,
225 .stats_get = i40e_dev_stats_get,
226 .stats_reset = i40e_dev_stats_reset,
227 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
228 .dev_infos_get = i40e_dev_info_get,
229 .vlan_filter_set = i40e_vlan_filter_set,
230 .vlan_tpid_set = i40e_vlan_tpid_set,
231 .vlan_offload_set = i40e_vlan_offload_set,
232 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
233 .vlan_pvid_set = i40e_vlan_pvid_set,
234 .rx_queue_setup = i40e_dev_rx_queue_setup,
235 .rx_queue_release = i40e_dev_rx_queue_release,
236 .rx_queue_count = i40e_dev_rx_queue_count,
237 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
238 .tx_queue_setup = i40e_dev_tx_queue_setup,
239 .tx_queue_release = i40e_dev_tx_queue_release,
240 .dev_led_on = i40e_dev_led_on,
241 .dev_led_off = i40e_dev_led_off,
242 .flow_ctrl_set = i40e_flow_ctrl_set,
243 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
244 .mac_addr_add = i40e_macaddr_add,
245 .mac_addr_remove = i40e_macaddr_remove,
246 .reta_update = i40e_dev_rss_reta_update,
247 .reta_query = i40e_dev_rss_reta_query,
248 .rss_hash_update = i40e_dev_rss_hash_update,
249 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
252 static struct eth_driver rte_i40e_pmd = {
254 .name = "rte_i40e_pmd",
255 .id_table = pci_id_i40e_map,
256 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
258 .eth_dev_init = eth_i40e_dev_init,
259 .dev_private_size = sizeof(struct i40e_adapter),
263 i40e_prev_power_of_2(int n)
281 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
282 struct rte_eth_link *link)
284 struct rte_eth_link *dst = link;
285 struct rte_eth_link *src = &(dev->data->dev_link);
287 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
288 *(uint64_t *)src) == 0)
295 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
296 struct rte_eth_link *link)
298 struct rte_eth_link *dst = &(dev->data->dev_link);
299 struct rte_eth_link *src = link;
301 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
302 *(uint64_t *)src) == 0)
309 * Driver initialization routine.
310 * Invoked once at EAL init time.
311 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
314 rte_i40e_pmd_init(const char *name __rte_unused,
315 const char *params __rte_unused)
317 PMD_INIT_FUNC_TRACE();
318 rte_eth_driver_register(&rte_i40e_pmd);
323 static struct rte_driver rte_i40e_driver = {
325 .init = rte_i40e_pmd_init,
328 PMD_REGISTER_DRIVER(rte_i40e_driver);
331 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
332 struct rte_eth_dev *dev)
334 struct rte_pci_device *pci_dev;
335 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
336 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 struct i40e_vsi *vsi;
342 PMD_INIT_FUNC_TRACE();
344 dev->dev_ops = &i40e_eth_dev_ops;
345 dev->rx_pkt_burst = i40e_recv_pkts;
346 dev->tx_pkt_burst = i40e_xmit_pkts;
348 /* for secondary processes, we don't initialise any further as primary
349 * has already done this work. Only check we don't need a different
351 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
352 if (dev->data->scattered_rx)
353 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
356 pci_dev = dev->pci_dev;
357 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
358 pf->adapter->eth_dev = dev;
359 pf->dev_data = dev->data;
361 hw->back = I40E_PF_TO_ADAPTER(pf);
362 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
364 PMD_INIT_LOG(ERR, "Hardware is not available, "
365 "as address is NULL\n");
369 hw->vendor_id = pci_dev->id.vendor_id;
370 hw->device_id = pci_dev->id.device_id;
371 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
372 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
373 hw->bus.device = pci_dev->addr.devid;
374 hw->bus.func = pci_dev->addr.function;
376 /* Disable all queues before PF reset, as required */
377 ret = i40e_pf_disable_all_queues(hw);
378 if (ret != I40E_SUCCESS) {
379 PMD_INIT_LOG(ERR, "Failed to disable queues %u\n", ret);
383 /* Reset here to make sure all is clean for each PF */
384 ret = i40e_pf_reset(hw);
386 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
390 /* Initialize the shared code (base driver) */
391 ret = i40e_init_shared_code(hw);
393 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
397 /* Initialize the parameters for adminq */
398 i40e_init_adminq_parameter(hw);
399 ret = i40e_init_adminq(hw);
400 if (ret != I40E_SUCCESS) {
401 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
404 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
405 "%02d.%02d.%02d eetrack %04x\n",
406 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
407 hw->aq.api_maj_ver, hw->aq.api_min_ver,
408 ((hw->nvm.version >> 12) & 0xf),
409 ((hw->nvm.version >> 4) & 0xff),
410 (hw->nvm.version & 0xf), hw->nvm.eetrack);
413 ret = i40e_aq_stop_lldp(hw, true, NULL);
414 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
415 PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
418 i40e_clear_pxe_mode(hw);
420 /* Get hw capabilities */
421 ret = i40e_get_cap(hw);
422 if (ret != I40E_SUCCESS) {
423 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
424 goto err_get_capabilities;
427 /* Initialize parameters for PF */
428 ret = i40e_pf_parameter_init(dev);
430 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
431 goto err_parameter_init;
434 /* Initialize the queue management */
435 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
437 PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
438 goto err_qp_pool_init;
440 ret = i40e_res_pool_init(&pf->msix_pool, 1,
441 hw->func_caps.num_msix_vectors - 1);
443 PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
444 goto err_msix_pool_init;
447 /* Initialize lan hmc */
448 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
449 hw->func_caps.num_rx_qp, 0, 0);
450 if (ret != I40E_SUCCESS) {
451 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
452 goto err_init_lan_hmc;
455 /* Configure lan hmc */
456 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
457 if (ret != I40E_SUCCESS) {
458 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
459 goto err_configure_lan_hmc;
462 /* Get and check the mac address */
463 i40e_get_mac_addr(hw, hw->mac.addr);
464 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
465 PMD_INIT_LOG(ERR, "mac address is not valid");
467 goto err_get_mac_addr;
469 /* Copy the permanent MAC address */
470 ether_addr_copy((struct ether_addr *) hw->mac.addr,
471 (struct ether_addr *) hw->mac.perm_addr);
473 /* Disable flow control */
474 hw->fc.requested_mode = I40E_FC_NONE;
475 i40e_set_fc(hw, &aq_fail, TRUE);
477 /* PF setup, which includes VSI setup */
478 ret = i40e_pf_setup(pf);
480 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
481 goto err_setup_pf_switch;
486 /* Disable double vlan by default */
487 i40e_vsi_config_double_vlan(vsi, FALSE);
489 if (!vsi->max_macaddrs)
490 len = ETHER_ADDR_LEN;
492 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
494 /* Should be after VSI initialized */
495 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
496 if (!dev->data->mac_addrs) {
497 PMD_INIT_LOG(ERR, "Failed to allocated memory "
498 "for storing mac address");
499 goto err_get_mac_addr;
501 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
502 &dev->data->mac_addrs[0]);
504 /* initialize pf host driver to setup SRIOV resource if applicable */
505 i40e_pf_host_init(dev);
507 /* register callback func to eal lib */
508 rte_intr_callback_register(&(pci_dev->intr_handle),
509 i40e_dev_interrupt_handler, (void *)dev);
511 /* configure and enable device interrupt */
512 i40e_pf_config_irq0(hw);
513 i40e_pf_enable_irq0(hw);
515 /* enable uio intr after callback register */
516 rte_intr_enable(&(pci_dev->intr_handle));
521 rte_free(pf->main_vsi);
523 err_configure_lan_hmc:
524 (void)i40e_shutdown_lan_hmc(hw);
526 i40e_res_pool_destroy(&pf->msix_pool);
528 i40e_res_pool_destroy(&pf->qp_pool);
531 err_get_capabilities:
532 (void)i40e_shutdown_adminq(hw);
538 i40e_dev_configure(struct rte_eth_dev *dev)
540 return i40e_dev_init_vlan(dev);
544 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
546 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
547 uint16_t msix_vect = vsi->msix_intr;
550 for (i = 0; i < vsi->nb_qps; i++) {
551 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
552 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
556 if (vsi->type != I40E_VSI_SRIOV) {
557 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
558 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
562 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
563 vsi->user_param + (msix_vect - 1);
565 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
567 I40E_WRITE_FLUSH(hw);
570 static inline uint16_t
571 i40e_calc_itr_interval(int16_t interval)
573 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
574 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
576 /* Convert to hardware count, as writing each 1 represents 2 us */
581 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
584 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
585 uint16_t msix_vect = vsi->msix_intr;
586 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
589 for (i = 0; i < vsi->nb_qps; i++)
590 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
592 /* Bind all RX queues to allocated MSIX interrupt */
593 for (i = 0; i < vsi->nb_qps; i++) {
594 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
595 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
596 ((vsi->base_queue + i + 1) <<
597 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
598 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
599 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
601 if (i == vsi->nb_qps - 1)
602 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
603 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
606 /* Write first RX queue to Link list register as the head element */
607 if (vsi->type != I40E_VSI_SRIOV) {
608 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
609 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
610 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
612 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
613 msix_vect - 1), interval);
615 /* Disable auto-mask on enabling of all none-zero interrupt */
616 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
617 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
621 /* num_msix_vectors_vf needs to minus irq0 */
622 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
623 vsi->user_param + (msix_vect - 1);
625 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
626 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
627 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
630 I40E_WRITE_FLUSH(hw);
634 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
636 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
637 uint16_t interval = i40e_calc_itr_interval(\
638 RTE_LIBRTE_I40E_ITR_INTERVAL);
640 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
641 I40E_PFINT_DYN_CTLN_INTENA_MASK |
642 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
643 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
644 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
648 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
650 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
652 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
656 i40e_dev_start(struct rte_eth_dev *dev)
658 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
659 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
660 struct i40e_vsi *vsi = pf->main_vsi;
664 ret = i40e_vsi_init(vsi);
665 if (ret != I40E_SUCCESS) {
666 PMD_DRV_LOG(ERR, "Failed to init VSI\n");
670 /* Map queues with MSIX interrupt */
671 i40e_vsi_queues_bind_intr(vsi);
672 i40e_vsi_enable_queues_intr(vsi);
674 /* Enable all queues which have been configured */
675 ret = i40e_vsi_switch_queues(vsi, TRUE);
676 if (ret != I40E_SUCCESS) {
677 PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
681 /* Enable receiving broadcast packets */
682 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
683 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
684 if (ret != I40E_SUCCESS)
685 PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
691 i40e_vsi_switch_queues(vsi, FALSE);
692 i40e_dev_clear_queues(dev);
698 i40e_dev_stop(struct rte_eth_dev *dev)
700 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
701 struct i40e_vsi *vsi = pf->main_vsi;
703 /* Disable all queues */
704 i40e_vsi_switch_queues(vsi, FALSE);
706 /* Clear all queues and release memory */
707 i40e_dev_clear_queues(dev);
709 /* un-map queues with interrupt registers */
710 i40e_vsi_disable_queues_intr(vsi);
711 i40e_vsi_queues_unbind_intr(vsi);
715 i40e_dev_close(struct rte_eth_dev *dev)
717 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
718 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721 PMD_INIT_FUNC_TRACE();
725 /* Disable interrupt */
726 i40e_pf_disable_irq0(hw);
727 rte_intr_disable(&(dev->pci_dev->intr_handle));
729 /* shutdown and destroy the HMC */
730 i40e_shutdown_lan_hmc(hw);
732 /* release all the existing VSIs and VEBs */
733 i40e_vsi_release(pf->main_vsi);
735 /* shutdown the adminq */
736 i40e_aq_queue_shutdown(hw, true);
737 i40e_shutdown_adminq(hw);
739 i40e_res_pool_destroy(&pf->qp_pool);
740 i40e_res_pool_destroy(&pf->msix_pool);
742 /* force a PF reset to clean anything leftover */
743 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
744 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
745 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
746 I40E_WRITE_FLUSH(hw);
750 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
752 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
753 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
754 struct i40e_vsi *vsi = pf->main_vsi;
757 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
759 if (status != I40E_SUCCESS)
760 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
764 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
766 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
767 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
768 struct i40e_vsi *vsi = pf->main_vsi;
771 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
773 if (status != I40E_SUCCESS)
774 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
778 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
780 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
781 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
782 struct i40e_vsi *vsi = pf->main_vsi;
785 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
786 if (ret != I40E_SUCCESS)
787 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
791 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
793 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
794 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
795 struct i40e_vsi *vsi = pf->main_vsi;
798 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
799 vsi->seid, FALSE, NULL);
800 if (ret != I40E_SUCCESS)
801 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
805 i40e_dev_link_update(struct rte_eth_dev *dev,
806 __rte_unused int wait_to_complete)
808 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
809 struct i40e_link_status link_status;
810 struct rte_eth_link link, old;
813 memset(&link, 0, sizeof(link));
814 memset(&old, 0, sizeof(old));
815 memset(&link_status, 0, sizeof(link_status));
816 rte_i40e_dev_atomic_read_link_status(dev, &old);
818 /* Get link status information from hardware */
819 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
820 if (status != I40E_SUCCESS) {
821 link.link_speed = ETH_LINK_SPEED_100;
822 link.link_duplex = ETH_LINK_FULL_DUPLEX;
823 PMD_DRV_LOG(ERR, "Failed to get link info\n");
827 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
829 if (!link.link_status)
832 /* i40e uses full duplex only */
833 link.link_duplex = ETH_LINK_FULL_DUPLEX;
835 /* Parse the link status */
836 switch (link_status.link_speed) {
837 case I40E_LINK_SPEED_100MB:
838 link.link_speed = ETH_LINK_SPEED_100;
840 case I40E_LINK_SPEED_1GB:
841 link.link_speed = ETH_LINK_SPEED_1000;
843 case I40E_LINK_SPEED_10GB:
844 link.link_speed = ETH_LINK_SPEED_10G;
846 case I40E_LINK_SPEED_20GB:
847 link.link_speed = ETH_LINK_SPEED_20G;
849 case I40E_LINK_SPEED_40GB:
850 link.link_speed = ETH_LINK_SPEED_40G;
853 link.link_speed = ETH_LINK_SPEED_100;
858 rte_i40e_dev_atomic_write_link_status(dev, &link);
859 if (link.link_status == old.link_status)
865 /* Get all the statistics of a VSI */
867 i40e_update_vsi_stats(struct i40e_vsi *vsi)
869 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
870 struct i40e_eth_stats *nes = &vsi->eth_stats;
871 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
872 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
874 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
875 vsi->offset_loaded, &oes->rx_bytes,
877 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
878 vsi->offset_loaded, &oes->rx_unicast,
880 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
881 vsi->offset_loaded, &oes->rx_multicast,
883 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
884 vsi->offset_loaded, &oes->rx_broadcast,
886 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
887 &oes->rx_discards, &nes->rx_discards);
888 /* GLV_REPC not supported */
889 /* GLV_RMPC not supported */
890 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
891 &oes->rx_unknown_protocol,
892 &nes->rx_unknown_protocol);
893 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
894 vsi->offset_loaded, &oes->tx_bytes,
896 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
897 vsi->offset_loaded, &oes->tx_unicast,
899 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
900 vsi->offset_loaded, &oes->tx_multicast,
902 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
903 vsi->offset_loaded, &oes->tx_broadcast,
905 /* GLV_TDPC not supported */
906 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
907 &oes->tx_errors, &nes->tx_errors);
908 vsi->offset_loaded = true;
910 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
911 printf("***************** VSI[%u] stats start *******************\n",
913 printf("rx_bytes: %lu\n", nes->rx_bytes);
914 printf("rx_unicast: %lu\n", nes->rx_unicast);
915 printf("rx_multicast: %lu\n", nes->rx_multicast);
916 printf("rx_broadcast: %lu\n", nes->rx_broadcast);
917 printf("rx_discards: %lu\n", nes->rx_discards);
918 printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
919 printf("tx_bytes: %lu\n", nes->tx_bytes);
920 printf("tx_unicast: %lu\n", nes->tx_unicast);
921 printf("tx_multicast: %lu\n", nes->tx_multicast);
922 printf("tx_broadcast: %lu\n", nes->tx_broadcast);
923 printf("tx_discards: %lu\n", nes->tx_discards);
924 printf("tx_errors: %lu\n", nes->tx_errors);
925 printf("***************** VSI[%u] stats end *******************\n",
927 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
930 /* Get all statistics of a port */
932 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
935 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
936 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
937 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
938 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
940 /* Get statistics of struct i40e_eth_stats */
941 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
942 I40E_GLPRT_GORCL(hw->port),
943 pf->offset_loaded, &os->eth.rx_bytes,
945 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
946 I40E_GLPRT_UPRCL(hw->port),
947 pf->offset_loaded, &os->eth.rx_unicast,
948 &ns->eth.rx_unicast);
949 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
950 I40E_GLPRT_MPRCL(hw->port),
951 pf->offset_loaded, &os->eth.rx_multicast,
952 &ns->eth.rx_multicast);
953 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
954 I40E_GLPRT_BPRCL(hw->port),
955 pf->offset_loaded, &os->eth.rx_broadcast,
956 &ns->eth.rx_broadcast);
957 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
958 pf->offset_loaded, &os->eth.rx_discards,
959 &ns->eth.rx_discards);
960 /* GLPRT_REPC not supported */
961 /* GLPRT_RMPC not supported */
962 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
964 &os->eth.rx_unknown_protocol,
965 &ns->eth.rx_unknown_protocol);
966 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
967 I40E_GLPRT_GOTCL(hw->port),
968 pf->offset_loaded, &os->eth.tx_bytes,
970 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
971 I40E_GLPRT_UPTCL(hw->port),
972 pf->offset_loaded, &os->eth.tx_unicast,
973 &ns->eth.tx_unicast);
974 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
975 I40E_GLPRT_MPTCL(hw->port),
976 pf->offset_loaded, &os->eth.tx_multicast,
977 &ns->eth.tx_multicast);
978 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
979 I40E_GLPRT_BPTCL(hw->port),
980 pf->offset_loaded, &os->eth.tx_broadcast,
981 &ns->eth.tx_broadcast);
982 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
983 pf->offset_loaded, &os->eth.tx_discards,
984 &ns->eth.tx_discards);
985 /* GLPRT_TEPC not supported */
987 /* additional port specific stats */
988 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
989 pf->offset_loaded, &os->tx_dropped_link_down,
990 &ns->tx_dropped_link_down);
991 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
992 pf->offset_loaded, &os->crc_errors,
994 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
995 pf->offset_loaded, &os->illegal_bytes,
997 /* GLPRT_ERRBC not supported */
998 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
999 pf->offset_loaded, &os->mac_local_faults,
1000 &ns->mac_local_faults);
1001 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1002 pf->offset_loaded, &os->mac_remote_faults,
1003 &ns->mac_remote_faults);
1004 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1005 pf->offset_loaded, &os->rx_length_errors,
1006 &ns->rx_length_errors);
1007 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1008 pf->offset_loaded, &os->link_xon_rx,
1010 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1011 pf->offset_loaded, &os->link_xoff_rx,
1013 for (i = 0; i < 8; i++) {
1014 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1016 &os->priority_xon_rx[i],
1017 &ns->priority_xon_rx[i]);
1018 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1020 &os->priority_xoff_rx[i],
1021 &ns->priority_xoff_rx[i]);
1023 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1024 pf->offset_loaded, &os->link_xon_tx,
1026 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1027 pf->offset_loaded, &os->link_xoff_tx,
1029 for (i = 0; i < 8; i++) {
1030 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1032 &os->priority_xon_tx[i],
1033 &ns->priority_xon_tx[i]);
1034 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1036 &os->priority_xoff_tx[i],
1037 &ns->priority_xoff_tx[i]);
1038 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1040 &os->priority_xon_2_xoff[i],
1041 &ns->priority_xon_2_xoff[i]);
1043 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1044 I40E_GLPRT_PRC64L(hw->port),
1045 pf->offset_loaded, &os->rx_size_64,
1047 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1048 I40E_GLPRT_PRC127L(hw->port),
1049 pf->offset_loaded, &os->rx_size_127,
1051 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1052 I40E_GLPRT_PRC255L(hw->port),
1053 pf->offset_loaded, &os->rx_size_255,
1055 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1056 I40E_GLPRT_PRC511L(hw->port),
1057 pf->offset_loaded, &os->rx_size_511,
1059 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1060 I40E_GLPRT_PRC1023L(hw->port),
1061 pf->offset_loaded, &os->rx_size_1023,
1063 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1064 I40E_GLPRT_PRC1522L(hw->port),
1065 pf->offset_loaded, &os->rx_size_1522,
1067 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1068 I40E_GLPRT_PRC9522L(hw->port),
1069 pf->offset_loaded, &os->rx_size_big,
1071 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1072 pf->offset_loaded, &os->rx_undersize,
1074 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1075 pf->offset_loaded, &os->rx_fragments,
1077 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1078 pf->offset_loaded, &os->rx_oversize,
1080 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1081 pf->offset_loaded, &os->rx_jabber,
1083 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1084 I40E_GLPRT_PTC64L(hw->port),
1085 pf->offset_loaded, &os->tx_size_64,
1087 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1088 I40E_GLPRT_PTC127L(hw->port),
1089 pf->offset_loaded, &os->tx_size_127,
1091 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1092 I40E_GLPRT_PTC255L(hw->port),
1093 pf->offset_loaded, &os->tx_size_255,
1095 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1096 I40E_GLPRT_PTC511L(hw->port),
1097 pf->offset_loaded, &os->tx_size_511,
1099 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1100 I40E_GLPRT_PTC1023L(hw->port),
1101 pf->offset_loaded, &os->tx_size_1023,
1103 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1104 I40E_GLPRT_PTC1522L(hw->port),
1105 pf->offset_loaded, &os->tx_size_1522,
1107 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1108 I40E_GLPRT_PTC9522L(hw->port),
1109 pf->offset_loaded, &os->tx_size_big,
1111 /* GLPRT_MSPDC not supported */
1112 /* GLPRT_XEC not supported */
1114 pf->offset_loaded = true;
1116 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1117 ns->eth.rx_broadcast;
1118 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1119 ns->eth.tx_broadcast;
1120 stats->ibytes = ns->eth.rx_bytes;
1121 stats->obytes = ns->eth.tx_bytes;
1122 stats->oerrors = ns->eth.tx_errors;
1123 stats->imcasts = ns->eth.rx_multicast;
1126 i40e_update_vsi_stats(pf->main_vsi);
1128 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1129 printf("***************** PF stats start *******************\n");
1130 printf("rx_bytes: %lu\n", ns->eth.rx_bytes);
1131 printf("rx_unicast: %lu\n", ns->eth.rx_unicast);
1132 printf("rx_multicast: %lu\n", ns->eth.rx_multicast);
1133 printf("rx_broadcast: %lu\n", ns->eth.rx_broadcast);
1134 printf("rx_discards: %lu\n", ns->eth.rx_discards);
1135 printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
1136 printf("tx_bytes: %lu\n", ns->eth.tx_bytes);
1137 printf("tx_unicast: %lu\n", ns->eth.tx_unicast);
1138 printf("tx_multicast: %lu\n", ns->eth.tx_multicast);
1139 printf("tx_broadcast: %lu\n", ns->eth.tx_broadcast);
1140 printf("tx_discards: %lu\n", ns->eth.tx_discards);
1141 printf("tx_errors: %lu\n", ns->eth.tx_errors);
1143 printf("tx_dropped_link_down: %lu\n", ns->tx_dropped_link_down);
1144 printf("crc_errors: %lu\n", ns->crc_errors);
1145 printf("illegal_bytes: %lu\n", ns->illegal_bytes);
1146 printf("error_bytes: %lu\n", ns->error_bytes);
1147 printf("mac_local_faults: %lu\n", ns->mac_local_faults);
1148 printf("mac_remote_faults: %lu\n", ns->mac_remote_faults);
1149 printf("rx_length_errors: %lu\n", ns->rx_length_errors);
1150 printf("link_xon_rx: %lu\n", ns->link_xon_rx);
1151 printf("link_xoff_rx: %lu\n", ns->link_xoff_rx);
1152 for (i = 0; i < 8; i++) {
1153 printf("priority_xon_rx[%d]: %lu\n",
1154 i, ns->priority_xon_rx[i]);
1155 printf("priority_xoff_rx[%d]: %lu\n",
1156 i, ns->priority_xoff_rx[i]);
1158 printf("link_xon_tx: %lu\n", ns->link_xon_tx);
1159 printf("link_xoff_tx: %lu\n", ns->link_xoff_tx);
1160 for (i = 0; i < 8; i++) {
1161 printf("priority_xon_tx[%d]: %lu\n",
1162 i, ns->priority_xon_tx[i]);
1163 printf("priority_xoff_tx[%d]: %lu\n",
1164 i, ns->priority_xoff_tx[i]);
1165 printf("priority_xon_2_xoff[%d]: %lu\n",
1166 i, ns->priority_xon_2_xoff[i]);
1168 printf("rx_size_64: %lu\n", ns->rx_size_64);
1169 printf("rx_size_127: %lu\n", ns->rx_size_127);
1170 printf("rx_size_255: %lu\n", ns->rx_size_255);
1171 printf("rx_size_511: %lu\n", ns->rx_size_511);
1172 printf("rx_size_1023: %lu\n", ns->rx_size_1023);
1173 printf("rx_size_1522: %lu\n", ns->rx_size_1522);
1174 printf("rx_size_big: %lu\n", ns->rx_size_big);
1175 printf("rx_undersize: %lu\n", ns->rx_undersize);
1176 printf("rx_fragments: %lu\n", ns->rx_fragments);
1177 printf("rx_oversize: %lu\n", ns->rx_oversize);
1178 printf("rx_jabber: %lu\n", ns->rx_jabber);
1179 printf("tx_size_64: %lu\n", ns->tx_size_64);
1180 printf("tx_size_127: %lu\n", ns->tx_size_127);
1181 printf("tx_size_255: %lu\n", ns->tx_size_255);
1182 printf("tx_size_511: %lu\n", ns->tx_size_511);
1183 printf("tx_size_1023: %lu\n", ns->tx_size_1023);
1184 printf("tx_size_1522: %lu\n", ns->tx_size_1522);
1185 printf("tx_size_big: %lu\n", ns->tx_size_big);
1186 printf("mac_short_packet_dropped: %lu\n",
1187 ns->mac_short_packet_dropped);
1188 printf("checksum_error: %lu\n", ns->checksum_error);
1189 printf("***************** PF stats end ********************\n");
1190 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1193 /* Reset the statistics */
1195 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1197 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1199 /* It results in reloading the start point of each counter */
1200 pf->offset_loaded = false;
1204 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1205 __rte_unused uint16_t queue_id,
1206 __rte_unused uint8_t stat_idx,
1207 __rte_unused uint8_t is_rx)
1209 PMD_INIT_FUNC_TRACE();
1215 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1217 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1218 struct i40e_vsi *vsi = pf->main_vsi;
1220 dev_info->max_rx_queues = vsi->nb_qps;
1221 dev_info->max_tx_queues = vsi->nb_qps;
1222 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1223 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1224 dev_info->max_mac_addrs = vsi->max_macaddrs;
1225 dev_info->max_vfs = dev->pci_dev->max_vfs;
1226 dev_info->rx_offload_capa =
1227 DEV_RX_OFFLOAD_VLAN_STRIP |
1228 DEV_RX_OFFLOAD_IPV4_CKSUM |
1229 DEV_RX_OFFLOAD_UDP_CKSUM |
1230 DEV_RX_OFFLOAD_TCP_CKSUM;
1231 dev_info->tx_offload_capa =
1232 DEV_TX_OFFLOAD_VLAN_INSERT |
1233 DEV_TX_OFFLOAD_IPV4_CKSUM |
1234 DEV_TX_OFFLOAD_UDP_CKSUM |
1235 DEV_TX_OFFLOAD_TCP_CKSUM |
1236 DEV_TX_OFFLOAD_SCTP_CKSUM;
1240 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1242 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1243 struct i40e_vsi *vsi = pf->main_vsi;
1244 PMD_INIT_FUNC_TRACE();
1247 return i40e_vsi_add_vlan(vsi, vlan_id);
1249 return i40e_vsi_delete_vlan(vsi, vlan_id);
1253 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1254 __rte_unused uint16_t tpid)
1256 PMD_INIT_FUNC_TRACE();
1260 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1262 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1263 struct i40e_vsi *vsi = pf->main_vsi;
1265 if (mask & ETH_VLAN_STRIP_MASK) {
1266 /* Enable or disable VLAN stripping */
1267 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1268 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1270 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1273 if (mask & ETH_VLAN_EXTEND_MASK) {
1274 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1275 i40e_vsi_config_double_vlan(vsi, TRUE);
1277 i40e_vsi_config_double_vlan(vsi, FALSE);
1282 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1283 __rte_unused uint16_t queue,
1284 __rte_unused int on)
1286 PMD_INIT_FUNC_TRACE();
1290 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1292 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1293 struct i40e_vsi *vsi = pf->main_vsi;
1294 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1295 struct i40e_vsi_vlan_pvid_info info;
1297 memset(&info, 0, sizeof(info));
1300 info.config.pvid = pvid;
1302 info.config.reject.tagged =
1303 data->dev_conf.txmode.hw_vlan_reject_tagged;
1304 info.config.reject.untagged =
1305 data->dev_conf.txmode.hw_vlan_reject_untagged;
1308 return i40e_vsi_vlan_pvid_set(vsi, &info);
1312 i40e_dev_led_on(struct rte_eth_dev *dev)
1314 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315 uint32_t mode = i40e_led_get(hw);
1318 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1324 i40e_dev_led_off(struct rte_eth_dev *dev)
1326 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1327 uint32_t mode = i40e_led_get(hw);
1330 i40e_led_set(hw, 0, false);
1336 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1337 __rte_unused struct rte_eth_fc_conf *fc_conf)
1339 PMD_INIT_FUNC_TRACE();
1345 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1346 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1348 PMD_INIT_FUNC_TRACE();
1353 /* Add a MAC address, and update filters */
1355 i40e_macaddr_add(struct rte_eth_dev *dev,
1356 struct ether_addr *mac_addr,
1357 __attribute__((unused)) uint32_t index,
1358 __attribute__((unused)) uint32_t pool)
1360 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1361 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1362 struct i40e_vsi *vsi = pf->main_vsi;
1363 struct ether_addr old_mac;
1366 if (!is_valid_assigned_ether_addr(mac_addr)) {
1367 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1371 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1372 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
1376 /* Write mac address */
1377 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1378 mac_addr->addr_bytes, NULL);
1379 if (ret != I40E_SUCCESS) {
1380 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1384 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1385 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1388 ret = i40e_vsi_add_mac(vsi, mac_addr);
1389 if (ret != I40E_SUCCESS) {
1390 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
1394 ether_addr_copy(mac_addr, &pf->dev_addr);
1395 i40e_vsi_delete_mac(vsi, &old_mac);
1398 /* Remove a MAC address, and update filters */
1400 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1402 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1403 struct i40e_vsi *vsi = pf->main_vsi;
1404 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1405 struct ether_addr *macaddr;
1407 struct i40e_hw *hw =
1408 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1410 if (index >= vsi->max_macaddrs)
1413 macaddr = &(data->mac_addrs[index]);
1414 if (!is_valid_assigned_ether_addr(macaddr))
1417 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1418 hw->mac.perm_addr, NULL);
1419 if (ret != I40E_SUCCESS) {
1420 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1424 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1426 ret = i40e_vsi_delete_mac(vsi, macaddr);
1427 if (ret != I40E_SUCCESS)
1430 /* Clear device address as it has been removed */
1431 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1432 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1436 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1437 struct rte_eth_rss_reta *reta_conf)
1439 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1441 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1443 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1445 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1447 mask = (uint8_t)((reta_conf->mask_hi >>
1456 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1458 for (j = 0, lut = 0; j < 4; j++) {
1459 if (mask & (0x1 << j))
1460 lut |= reta_conf->reta[i + j] << (8 * j);
1462 lut |= l & (0xFF << (8 * j));
1464 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1471 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1472 struct rte_eth_rss_reta *reta_conf)
1474 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1476 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1478 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1480 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1482 mask = (uint8_t)((reta_conf->mask_hi >>
1488 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1489 for (j = 0; j < 4; j++) {
1490 if (mask & (0x1 << j))
1491 reta_conf->reta[i + j] =
1492 (uint8_t)((lut >> (8 * j)) & 0xFF);
1500 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1501 * @hw: pointer to the HW structure
1502 * @mem: pointer to mem struct to fill out
1503 * @size: size of memory requested
1504 * @alignment: what to align the allocation to
1506 enum i40e_status_code
1507 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1508 struct i40e_dma_mem *mem,
1512 static uint64_t id = 0;
1513 const struct rte_memzone *mz = NULL;
1514 char z_name[RTE_MEMZONE_NAMESIZE];
1517 return I40E_ERR_PARAM;
1520 rte_snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1521 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1523 return I40E_ERR_NO_MEMORY;
1528 mem->pa = mz->phys_addr;
1530 return I40E_SUCCESS;
1534 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1535 * @hw: pointer to the HW structure
1536 * @mem: ptr to mem struct to free
1538 enum i40e_status_code
1539 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1540 struct i40e_dma_mem *mem)
1542 if (!mem || !mem->va)
1543 return I40E_ERR_PARAM;
1548 return I40E_SUCCESS;
1552 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1553 * @hw: pointer to the HW structure
1554 * @mem: pointer to mem struct to fill out
1555 * @size: size of memory requested
1557 enum i40e_status_code
1558 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1559 struct i40e_virt_mem *mem,
1563 return I40E_ERR_PARAM;
1566 mem->va = rte_zmalloc("i40e", size, 0);
1569 return I40E_SUCCESS;
1571 return I40E_ERR_NO_MEMORY;
1575 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1576 * @hw: pointer to the HW structure
1577 * @mem: pointer to mem struct to free
1579 enum i40e_status_code
1580 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1581 struct i40e_virt_mem *mem)
1584 return I40E_ERR_PARAM;
1589 return I40E_SUCCESS;
1593 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1595 rte_spinlock_init(&sp->spinlock);
1599 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1601 rte_spinlock_lock(&sp->spinlock);
1605 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1607 rte_spinlock_unlock(&sp->spinlock);
1611 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1617 * Get the hardware capabilities, which will be parsed
1618 * and saved into struct i40e_hw.
1621 i40e_get_cap(struct i40e_hw *hw)
1623 struct i40e_aqc_list_capabilities_element_resp *buf;
1624 uint16_t len, size = 0;
1627 /* Calculate a huge enough buff for saving response data temporarily */
1628 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1629 I40E_MAX_CAP_ELE_NUM;
1630 buf = rte_zmalloc("i40e", len, 0);
1632 PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
1633 return I40E_ERR_NO_MEMORY;
1636 /* Get, parse the capabilities and save it to hw */
1637 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1638 i40e_aqc_opc_list_func_capabilities, NULL);
1639 if (ret != I40E_SUCCESS)
1640 PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
1642 /* Free the temporary buffer after being used */
1649 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1651 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1652 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1653 uint16_t sum_queues = 0, sum_vsis;
1655 /* First check if FW support SRIOV */
1656 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1657 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
1661 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1662 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1663 PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
1664 /* Allocate queues for pf */
1665 if (hw->func_caps.rss) {
1666 pf->flags |= I40E_FLAG_RSS;
1667 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1668 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1669 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1672 sum_queues = pf->lan_nb_qps;
1673 /* Default VSI is not counted in */
1675 PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
1677 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1678 pf->flags |= I40E_FLAG_SRIOV;
1679 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1680 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1681 PMD_INIT_LOG(ERR, "Config VF number %u, "
1682 "max supported %u.\n", dev->pci_dev->max_vfs,
1683 hw->func_caps.num_vfs);
1686 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1687 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1688 "max support %u queues.\n", pf->vf_nb_qps,
1689 I40E_MAX_QP_NUM_PER_VF);
1692 pf->vf_num = dev->pci_dev->max_vfs;
1693 sum_queues += pf->vf_nb_qps * pf->vf_num;
1694 sum_vsis += pf->vf_num;
1695 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
1696 pf->vf_num, pf->vf_nb_qps);
1700 if (hw->func_caps.vmdq) {
1701 pf->flags |= I40E_FLAG_VMDQ;
1702 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1703 sum_queues += pf->vmdq_nb_qps;
1705 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
1708 if (hw->func_caps.fd) {
1709 pf->flags |= I40E_FLAG_FDIR;
1710 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1712 * Each flow director consumes one VSI and one queue,
1713 * but can't calculate out predictably here.
1717 if (sum_vsis > pf->max_num_vsi ||
1718 sum_queues > hw->func_caps.num_rx_qp) {
1719 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
1720 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
1721 pf->max_num_vsi, sum_vsis);
1722 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
1723 hw->func_caps.num_rx_qp, sum_queues);
1727 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
1728 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1729 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
1730 sum_vsis, hw->func_caps.num_msix_vectors);
1733 return I40E_SUCCESS;
1737 i40e_pf_get_switch_config(struct i40e_pf *pf)
1739 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1740 struct i40e_aqc_get_switch_config_resp *switch_config;
1741 struct i40e_aqc_switch_config_element_resp *element;
1742 uint16_t start_seid = 0, num_reported;
1745 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1746 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1747 if (!switch_config) {
1748 PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
1752 /* Get the switch configurations */
1753 ret = i40e_aq_get_switch_config(hw, switch_config,
1754 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1755 if (ret != I40E_SUCCESS) {
1756 PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
1759 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1760 if (num_reported != 1) { /* The number should be 1 */
1761 PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
1765 /* Parse the switch configuration elements */
1766 element = &(switch_config->element[0]);
1767 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1768 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1769 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1771 PMD_DRV_LOG(INFO, "Unknown element type\n");
1774 rte_free(switch_config);
1780 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1783 struct pool_entry *entry;
1785 if (pool == NULL || num == 0)
1788 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1789 if (entry == NULL) {
1790 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1795 /* queue heap initialize */
1796 pool->num_free = num;
1797 pool->num_alloc = 0;
1799 LIST_INIT(&pool->alloc_list);
1800 LIST_INIT(&pool->free_list);
1802 /* Initialize element */
1806 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1811 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1813 struct pool_entry *entry;
1818 LIST_FOREACH(entry, &pool->alloc_list, next) {
1819 LIST_REMOVE(entry, next);
1823 LIST_FOREACH(entry, &pool->free_list, next) {
1824 LIST_REMOVE(entry, next);
1829 pool->num_alloc = 0;
1831 LIST_INIT(&pool->alloc_list);
1832 LIST_INIT(&pool->free_list);
1836 i40e_res_pool_free(struct i40e_res_pool_info *pool,
1839 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
1840 uint32_t pool_offset;
1844 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1848 pool_offset = base - pool->base;
1849 /* Lookup in alloc list */
1850 LIST_FOREACH(entry, &pool->alloc_list, next) {
1851 if (entry->base == pool_offset) {
1852 valid_entry = entry;
1853 LIST_REMOVE(entry, next);
1858 /* Not find, return */
1859 if (valid_entry == NULL) {
1860 PMD_DRV_LOG(ERR, "Failed to find entry\n");
1865 * Found it, move it to free list and try to merge.
1866 * In order to make merge easier, always sort it by qbase.
1867 * Find adjacent prev and last entries.
1870 LIST_FOREACH(entry, &pool->free_list, next) {
1871 if (entry->base > valid_entry->base) {
1879 /* Try to merge with next one*/
1881 /* Merge with next one */
1882 if (valid_entry->base + valid_entry->len == next->base) {
1883 next->base = valid_entry->base;
1884 next->len += valid_entry->len;
1885 rte_free(valid_entry);
1892 /* Merge with previous one */
1893 if (prev->base + prev->len == valid_entry->base) {
1894 prev->len += valid_entry->len;
1895 /* If it merge with next one, remove next node */
1897 LIST_REMOVE(valid_entry, next);
1898 rte_free(valid_entry);
1900 rte_free(valid_entry);
1906 /* Not find any entry to merge, insert */
1909 LIST_INSERT_AFTER(prev, valid_entry, next);
1910 else if (next != NULL)
1911 LIST_INSERT_BEFORE(next, valid_entry, next);
1912 else /* It's empty list, insert to head */
1913 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
1916 pool->num_free += valid_entry->len;
1917 pool->num_alloc -= valid_entry->len;
1923 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
1926 struct pool_entry *entry, *valid_entry;
1928 if (pool == NULL || num == 0) {
1929 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1933 if (pool->num_free < num) {
1934 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
1935 num, pool->num_free);
1940 /* Lookup in free list and find most fit one */
1941 LIST_FOREACH(entry, &pool->free_list, next) {
1942 if (entry->len >= num) {
1944 if (entry->len == num) {
1945 valid_entry = entry;
1948 if (valid_entry == NULL || valid_entry->len > entry->len)
1949 valid_entry = entry;
1953 /* Not find one to satisfy the request, return */
1954 if (valid_entry == NULL) {
1955 PMD_DRV_LOG(ERR, "No valid entry found\n");
1959 * The entry have equal queue number as requested,
1960 * remove it from alloc_list.
1962 if (valid_entry->len == num) {
1963 LIST_REMOVE(valid_entry, next);
1966 * The entry have more numbers than requested,
1967 * create a new entry for alloc_list and minus its
1968 * queue base and number in free_list.
1970 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
1971 if (entry == NULL) {
1972 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1976 entry->base = valid_entry->base;
1978 valid_entry->base += num;
1979 valid_entry->len -= num;
1980 valid_entry = entry;
1983 /* Insert it into alloc list, not sorted */
1984 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
1986 pool->num_free -= valid_entry->len;
1987 pool->num_alloc += valid_entry->len;
1989 return (valid_entry->base + pool->base);
1993 * bitmap_is_subset - Check whether src2 is subset of src1
1996 bitmap_is_subset(uint8_t src1, uint8_t src2)
1998 return !((src1 ^ src2) & src2);
2002 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2004 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2006 /* If DCB is not supported, only default TC is supported */
2007 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2008 PMD_DRV_LOG(ERR, "DCB is not enabled, "
2009 "only TC0 is supported\n");
2013 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2014 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2015 "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
2019 return I40E_SUCCESS;
2023 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2024 struct i40e_vsi_vlan_pvid_info *info)
2027 struct i40e_vsi_context ctxt;
2028 uint8_t vlan_flags = 0;
2031 if (vsi == NULL || info == NULL) {
2032 PMD_DRV_LOG(ERR, "invalid parameters\n");
2033 return I40E_ERR_PARAM;
2037 vsi->info.pvid = info->config.pvid;
2039 * If insert pvid is enabled, only tagged pkts are
2040 * allowed to be sent out.
2042 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2043 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2046 if (info->config.reject.tagged == 0)
2047 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2049 if (info->config.reject.untagged == 0)
2050 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2052 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2053 I40E_AQ_VSI_PVLAN_MODE_MASK);
2054 vsi->info.port_vlan_flags |= vlan_flags;
2055 vsi->info.valid_sections =
2056 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2057 memset(&ctxt, 0, sizeof(ctxt));
2058 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2059 ctxt.seid = vsi->seid;
2061 hw = I40E_VSI_TO_HW(vsi);
2062 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2063 if (ret != I40E_SUCCESS)
2064 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2070 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2072 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2074 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2076 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2077 if (ret != I40E_SUCCESS)
2081 PMD_DRV_LOG(ERR, "seid not valid\n");
2085 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2086 tc_bw_data.tc_valid_bits = enabled_tcmap;
2087 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2088 tc_bw_data.tc_bw_credits[i] =
2089 (enabled_tcmap & (1 << i)) ? 1 : 0;
2091 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2092 if (ret != I40E_SUCCESS) {
2093 PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
2097 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2098 sizeof(vsi->info.qs_handle));
2099 return I40E_SUCCESS;
2103 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2104 struct i40e_aqc_vsi_properties_data *info,
2105 uint8_t enabled_tcmap)
2107 int ret, total_tc = 0, i;
2108 uint16_t qpnum_per_tc, bsf, qp_idx;
2110 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2111 if (ret != I40E_SUCCESS)
2114 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2115 if (enabled_tcmap & (1 << i))
2117 vsi->enabled_tc = enabled_tcmap;
2119 /* Number of queues per enabled TC */
2120 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2121 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2122 bsf = rte_bsf32(qpnum_per_tc);
2124 /* Adjust the queue number to actual queues that can be applied */
2125 vsi->nb_qps = qpnum_per_tc * total_tc;
2128 * Configure TC and queue mapping parameters, for enabled TC,
2129 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2130 * default queue will serve it.
2133 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2134 if (vsi->enabled_tc & (1 << i)) {
2135 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2136 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2137 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2138 qp_idx += qpnum_per_tc;
2140 info->tc_mapping[i] = 0;
2143 /* Associate queue number with VSI */
2144 if (vsi->type == I40E_VSI_SRIOV) {
2145 info->mapping_flags |=
2146 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2147 for (i = 0; i < vsi->nb_qps; i++)
2148 info->queue_mapping[i] =
2149 rte_cpu_to_le_16(vsi->base_queue + i);
2151 info->mapping_flags |=
2152 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2153 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2155 info->valid_sections =
2156 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2158 return I40E_SUCCESS;
2162 i40e_veb_release(struct i40e_veb *veb)
2164 struct i40e_vsi *vsi;
2167 if (veb == NULL || veb->associate_vsi == NULL)
2170 if (!TAILQ_EMPTY(&veb->head)) {
2171 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
2175 vsi = veb->associate_vsi;
2176 hw = I40E_VSI_TO_HW(vsi);
2178 vsi->uplink_seid = veb->uplink_seid;
2179 i40e_aq_delete_element(hw, veb->seid, NULL);
2182 return I40E_SUCCESS;
2186 static struct i40e_veb *
2187 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2189 struct i40e_veb *veb;
2193 if (NULL == pf || vsi == NULL) {
2194 PMD_DRV_LOG(ERR, "veb setup failed, "
2195 "associated VSI shouldn't null\n");
2198 hw = I40E_PF_TO_HW(pf);
2200 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2202 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
2206 veb->associate_vsi = vsi;
2207 TAILQ_INIT(&veb->head);
2208 veb->uplink_seid = vsi->uplink_seid;
2210 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2211 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2213 if (ret != I40E_SUCCESS) {
2214 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
2215 hw->aq.asq_last_status);
2219 /* get statistics index */
2220 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2221 &veb->stats_idx, NULL, NULL, NULL);
2222 if (ret != I40E_SUCCESS) {
2223 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
2224 hw->aq.asq_last_status);
2228 /* Get VEB bandwidth, to be implemented */
2229 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2230 vsi->uplink_seid = veb->seid;
2239 i40e_vsi_release(struct i40e_vsi *vsi)
2243 struct i40e_vsi_list *vsi_list;
2245 struct i40e_mac_filter *f;
2248 return I40E_SUCCESS;
2250 pf = I40E_VSI_TO_PF(vsi);
2251 hw = I40E_VSI_TO_HW(vsi);
2253 /* VSI has child to attach, release child first */
2255 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2256 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2258 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2260 i40e_veb_release(vsi->veb);
2263 /* Remove all macvlan filters of the VSI */
2264 i40e_vsi_remove_all_macvlan_filter(vsi);
2265 TAILQ_FOREACH(f, &vsi->mac_list, next)
2268 if (vsi->type != I40E_VSI_MAIN) {
2269 /* Remove vsi from parent's sibling list */
2270 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2271 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
2272 return I40E_ERR_PARAM;
2274 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2275 &vsi->sib_vsi_list, list);
2277 /* Remove all switch element of the VSI */
2278 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2279 if (ret != I40E_SUCCESS)
2280 PMD_DRV_LOG(ERR, "Failed to delete element\n");
2282 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2284 if (vsi->type != I40E_VSI_SRIOV)
2285 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2288 return I40E_SUCCESS;
2292 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2294 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2295 struct i40e_aqc_remove_macvlan_element_data def_filter;
2298 if (vsi->type != I40E_VSI_MAIN)
2299 return I40E_ERR_CONFIG;
2300 memset(&def_filter, 0, sizeof(def_filter));
2301 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2303 def_filter.vlan_tag = 0;
2304 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2305 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2306 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2307 if (ret != I40E_SUCCESS) {
2308 struct i40e_mac_filter *f;
2310 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2311 "macvlan filter\n");
2312 /* It needs to add the permanent mac into mac list */
2313 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2315 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
2316 return I40E_ERR_NO_MEMORY;
2318 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2320 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2326 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2330 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2332 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2333 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2334 struct i40e_hw *hw = &vsi->adapter->hw;
2338 memset(&bw_config, 0, sizeof(bw_config));
2339 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2340 if (ret != I40E_SUCCESS) {
2341 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
2342 "configuration %u\n", hw->aq.asq_last_status);
2346 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2347 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2348 &ets_sla_config, NULL);
2349 if (ret != I40E_SUCCESS) {
2350 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2351 "configuration %u\n", hw->aq.asq_last_status);
2355 /* Not store the info yet, just print out */
2356 PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
2357 PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
2358 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2359 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
2360 ets_sla_config.share_credits[i]);
2361 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
2362 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2363 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2364 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2373 i40e_vsi_setup(struct i40e_pf *pf,
2374 enum i40e_vsi_type type,
2375 struct i40e_vsi *uplink_vsi,
2376 uint16_t user_param)
2378 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2379 struct i40e_vsi *vsi;
2381 struct i40e_vsi_context ctxt;
2382 struct ether_addr broadcast =
2383 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2385 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2386 PMD_DRV_LOG(ERR, "VSI setup failed, "
2387 "VSI link shouldn't be NULL\n");
2391 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2392 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2393 "uplink VSI should be NULL\n");
2397 /* If uplink vsi didn't setup VEB, create one first */
2398 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2399 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2401 if (NULL == uplink_vsi->veb) {
2402 PMD_DRV_LOG(ERR, "VEB setup failed\n");
2407 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2409 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
2412 TAILQ_INIT(&vsi->mac_list);
2414 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2415 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2416 vsi->parent_vsi = uplink_vsi;
2417 vsi->user_param = user_param;
2418 /* Allocate queues */
2419 switch (vsi->type) {
2420 case I40E_VSI_MAIN :
2421 vsi->nb_qps = pf->lan_nb_qps;
2423 case I40E_VSI_SRIOV :
2424 vsi->nb_qps = pf->vf_nb_qps;
2429 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2431 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2435 vsi->base_queue = ret;
2437 /* VF has MSIX interrupt in VF range, don't allocate here */
2438 if (type != I40E_VSI_SRIOV) {
2439 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2441 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2442 goto fail_queue_alloc;
2444 vsi->msix_intr = ret;
2448 if (type == I40E_VSI_MAIN) {
2449 /* For main VSI, no need to add since it's default one */
2450 vsi->uplink_seid = pf->mac_seid;
2451 vsi->seid = pf->main_vsi_seid;
2452 /* Bind queues with specific MSIX interrupt */
2454 * Needs 2 interrupt at least, one for misc cause which will
2455 * enabled from OS side, Another for queues binding the
2456 * interrupt from device side only.
2459 /* Get default VSI parameters from hardware */
2460 memset(&ctxt, 0, sizeof(ctxt));
2461 ctxt.seid = vsi->seid;
2462 ctxt.pf_num = hw->pf_id;
2463 ctxt.uplink_seid = vsi->uplink_seid;
2465 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2466 if (ret != I40E_SUCCESS) {
2467 PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
2468 goto fail_msix_alloc;
2470 (void)rte_memcpy(&vsi->info, &ctxt.info,
2471 sizeof(struct i40e_aqc_vsi_properties_data));
2472 vsi->vsi_id = ctxt.vsi_number;
2473 vsi->info.valid_sections = 0;
2475 /* Configure tc, enabled TC0 only */
2476 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2478 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
2479 goto fail_msix_alloc;
2482 /* TC, queue mapping */
2483 memset(&ctxt, 0, sizeof(ctxt));
2484 vsi->info.valid_sections |=
2485 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2486 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2487 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2488 (void)rte_memcpy(&ctxt.info, &vsi->info,
2489 sizeof(struct i40e_aqc_vsi_properties_data));
2490 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2491 I40E_DEFAULT_TCMAP);
2492 if (ret != I40E_SUCCESS) {
2493 PMD_DRV_LOG(ERR, "Failed to configure "
2494 "TC queue mapping\n");
2495 goto fail_msix_alloc;
2497 ctxt.seid = vsi->seid;
2498 ctxt.pf_num = hw->pf_id;
2499 ctxt.uplink_seid = vsi->uplink_seid;
2502 /* Update VSI parameters */
2503 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2504 if (ret != I40E_SUCCESS) {
2505 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2506 goto fail_msix_alloc;
2509 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2510 sizeof(vsi->info.tc_mapping));
2511 (void)rte_memcpy(&vsi->info.queue_mapping,
2512 &ctxt.info.queue_mapping,
2513 sizeof(vsi->info.queue_mapping));
2514 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2515 vsi->info.valid_sections = 0;
2517 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2521 * Updating default filter settings are necessary to prevent
2522 * reception of tagged packets.
2523 * Some old firmware configurations load a default macvlan
2524 * filter which accepts both tagged and untagged packets.
2525 * The updating is to use a normal filter instead if needed.
2526 * For NVM 4.2.2 or after, the updating is not needed anymore.
2527 * The firmware with correct configurations load the default
2528 * macvlan filter which is expected and cannot be removed.
2530 i40e_update_default_filter_setting(vsi);
2531 } else if (type == I40E_VSI_SRIOV) {
2532 memset(&ctxt, 0, sizeof(ctxt));
2534 * For other VSI, the uplink_seid equals to uplink VSI's
2535 * uplink_seid since they share same VEB
2537 vsi->uplink_seid = uplink_vsi->uplink_seid;
2538 ctxt.pf_num = hw->pf_id;
2539 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2540 ctxt.uplink_seid = vsi->uplink_seid;
2541 ctxt.connection_type = 0x1;
2542 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2544 /* Configure switch ID */
2545 ctxt.info.valid_sections |=
2546 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2547 ctxt.info.switch_id =
2548 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2549 /* Configure port/vlan */
2550 ctxt.info.valid_sections |=
2551 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2552 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2553 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2554 I40E_DEFAULT_TCMAP);
2555 if (ret != I40E_SUCCESS) {
2556 PMD_DRV_LOG(ERR, "Failed to configure "
2557 "TC queue mapping\n");
2558 goto fail_msix_alloc;
2560 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2561 ctxt.info.valid_sections |=
2562 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2564 * Since VSI is not created yet, only configure parameter,
2565 * will add vsi below.
2569 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
2570 goto fail_msix_alloc;
2573 if (vsi->type != I40E_VSI_MAIN) {
2574 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2576 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
2577 hw->aq.asq_last_status);
2578 goto fail_msix_alloc;
2580 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2581 vsi->info.valid_sections = 0;
2582 vsi->seid = ctxt.seid;
2583 vsi->vsi_id = ctxt.vsi_number;
2584 vsi->sib_vsi_list.vsi = vsi;
2585 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2586 &vsi->sib_vsi_list, list);
2589 /* MAC/VLAN configuration */
2590 ret = i40e_vsi_add_mac(vsi, &broadcast);
2591 if (ret != I40E_SUCCESS) {
2592 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
2593 goto fail_msix_alloc;
2596 /* Get VSI BW information */
2597 i40e_vsi_dump_bw_config(vsi);
2600 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2602 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2608 /* Configure vlan stripping on or off */
2610 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2612 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2613 struct i40e_vsi_context ctxt;
2615 int ret = I40E_SUCCESS;
2617 /* Check if it has been already on or off */
2618 if (vsi->info.valid_sections &
2619 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2621 if ((vsi->info.port_vlan_flags &
2622 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2623 return 0; /* already on */
2625 if ((vsi->info.port_vlan_flags &
2626 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2627 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2628 return 0; /* already off */
2633 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2635 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2636 vsi->info.valid_sections =
2637 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2638 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2639 vsi->info.port_vlan_flags |= vlan_flags;
2640 ctxt.seid = vsi->seid;
2641 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2642 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2644 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
2645 on ? "enable" : "disable");
2651 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2653 struct rte_eth_dev_data *data = dev->data;
2656 /* Apply vlan offload setting */
2657 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2659 /* Apply double-vlan setting, not implemented yet */
2661 /* Apply pvid setting */
2662 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2663 data->dev_conf.txmode.hw_vlan_insert_pvid);
2665 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
2671 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2673 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2675 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2679 i40e_update_flow_control(struct i40e_hw *hw)
2681 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2682 struct i40e_link_status link_status;
2683 uint32_t rxfc = 0, txfc = 0, reg;
2687 memset(&link_status, 0, sizeof(link_status));
2688 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2689 if (ret != I40E_SUCCESS) {
2690 PMD_DRV_LOG(ERR, "Failed to get link status information\n");
2691 goto write_reg; /* Disable flow control */
2694 an_info = hw->phy.link_info.an_info;
2695 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2696 PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
2697 ret = I40E_ERR_NOT_READY;
2698 goto write_reg; /* Disable flow control */
2701 * If link auto negotiation is enabled, flow control needs to
2702 * be configured according to it
2704 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2705 case I40E_LINK_PAUSE_RXTX:
2708 hw->fc.current_mode = I40E_FC_FULL;
2710 case I40E_AQ_LINK_PAUSE_RX:
2712 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2714 case I40E_AQ_LINK_PAUSE_TX:
2716 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2719 hw->fc.current_mode = I40E_FC_NONE;
2724 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2725 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2726 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2727 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2728 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2729 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2736 i40e_pf_setup(struct i40e_pf *pf)
2738 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2739 struct i40e_filter_control_settings settings;
2740 struct rte_eth_dev_data *dev_data = pf->dev_data;
2741 struct i40e_vsi *vsi;
2744 /* Clear all stats counters */
2745 pf->offset_loaded = FALSE;
2746 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2747 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2749 ret = i40e_pf_get_switch_config(pf);
2750 if (ret != I40E_SUCCESS) {
2751 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2756 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2758 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2759 return I40E_ERR_NOT_READY;
2762 dev_data->nb_rx_queues = vsi->nb_qps;
2763 dev_data->nb_tx_queues = vsi->nb_qps;
2765 /* Configure filter control */
2766 memset(&settings, 0, sizeof(settings));
2767 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2768 /* Enable ethtype and macvlan filters */
2769 settings.enable_ethtype = TRUE;
2770 settings.enable_macvlan = TRUE;
2771 ret = i40e_set_filter_control(hw, &settings);
2773 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2776 /* Update flow control according to the auto negotiation */
2777 i40e_update_flow_control(hw);
2779 return I40E_SUCCESS;
2783 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2788 /* Wait until the request is finished */
2789 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2790 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2791 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2792 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2793 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2799 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2800 return I40E_SUCCESS; /* already on, skip next steps */
2801 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2803 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2804 return I40E_SUCCESS; /* already off, skip next steps */
2805 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2807 /* Write the register */
2808 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2809 /* Check the result */
2810 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2811 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2812 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2814 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2815 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2818 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2819 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2823 /* Check if it is timeout */
2824 if (j >= I40E_CHK_Q_ENA_COUNT) {
2825 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
2826 (on ? "enable" : "disable"), q_idx);
2827 return I40E_ERR_TIMEOUT;
2829 return I40E_SUCCESS;
2831 /* Swith on or off the tx queues */
2833 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
2835 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2836 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2837 struct i40e_tx_queue *txq;
2841 pf_q = vsi->base_queue;
2842 for (i = 0; i < dev_data->nb_tx_queues; i++, pf_q++) {
2843 txq = dev_data->tx_queues[i];
2845 continue; /* Queue not configured */
2846 ret = i40e_switch_tx_queue(hw, pf_q, on);
2847 if ( ret != I40E_SUCCESS)
2851 return I40E_SUCCESS;
2855 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2860 /* Wait until the request is finished */
2861 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2862 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2863 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2864 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2865 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
2870 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2871 return I40E_SUCCESS; /* Already on, skip next steps */
2872 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2874 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2875 return I40E_SUCCESS; /* Already off, skip next steps */
2876 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2879 /* Write the register */
2880 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
2881 /* Check the result */
2882 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2883 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2884 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2886 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2887 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
2890 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2891 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2896 /* Check if it is timeout */
2897 if (j >= I40E_CHK_Q_ENA_COUNT) {
2898 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
2899 (on ? "enable" : "disable"), q_idx);
2900 return I40E_ERR_TIMEOUT;
2903 return I40E_SUCCESS;
2905 /* Switch on or off the rx queues */
2907 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
2909 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2910 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2911 struct i40e_rx_queue *rxq;
2915 pf_q = vsi->base_queue;
2916 for (i = 0; i < dev_data->nb_rx_queues; i++, pf_q++) {
2917 rxq = dev_data->rx_queues[i];
2919 continue; /* Queue not configured */
2920 ret = i40e_switch_rx_queue(hw, pf_q, on);
2921 if ( ret != I40E_SUCCESS)
2925 return I40E_SUCCESS;
2928 /* Switch on or off all the rx/tx queues */
2930 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
2935 /* enable rx queues before enabling tx queues */
2936 ret = i40e_vsi_switch_rx_queues(vsi, on);
2938 PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
2941 ret = i40e_vsi_switch_tx_queues(vsi, on);
2943 /* Stop tx queues before stopping rx queues */
2944 ret = i40e_vsi_switch_tx_queues(vsi, on);
2946 PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
2949 ret = i40e_vsi_switch_rx_queues(vsi, on);
2955 /* Initialize VSI for TX */
2957 i40e_vsi_tx_init(struct i40e_vsi *vsi)
2959 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2960 struct rte_eth_dev_data *data = pf->dev_data;
2962 uint32_t ret = I40E_SUCCESS;
2964 for (i = 0; i < data->nb_tx_queues; i++) {
2965 ret = i40e_tx_queue_init(data->tx_queues[i]);
2966 if (ret != I40E_SUCCESS)
2973 /* Initialize VSI for RX */
2975 i40e_vsi_rx_init(struct i40e_vsi *vsi)
2977 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2978 struct rte_eth_dev_data *data = pf->dev_data;
2979 int ret = I40E_SUCCESS;
2982 i40e_pf_config_mq_rx(pf);
2983 for (i = 0; i < data->nb_rx_queues; i++) {
2984 ret = i40e_rx_queue_init(data->rx_queues[i]);
2985 if (ret != I40E_SUCCESS) {
2986 PMD_DRV_LOG(ERR, "Failed to do RX queue "
2987 "initialization\n");
2995 /* Initialize VSI */
2997 i40e_vsi_init(struct i40e_vsi *vsi)
3001 err = i40e_vsi_tx_init(vsi);
3003 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
3006 err = i40e_vsi_rx_init(vsi);
3008 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
3016 i40e_stat_update_32(struct i40e_hw *hw,
3024 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3028 if (new_data >= *offset)
3029 *stat = (uint64_t)(new_data - *offset);
3031 *stat = (uint64_t)((new_data +
3032 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3036 i40e_stat_update_48(struct i40e_hw *hw,
3045 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3046 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3047 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3052 if (new_data >= *offset)
3053 *stat = new_data - *offset;
3055 *stat = (uint64_t)((new_data +
3056 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3058 *stat &= I40E_48_BIT_MASK;
3063 i40e_pf_disable_irq0(struct i40e_hw *hw)
3065 /* Disable all interrupt types */
3066 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3067 I40E_WRITE_FLUSH(hw);
3072 i40e_pf_enable_irq0(struct i40e_hw *hw)
3074 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3075 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3076 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3077 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3078 I40E_WRITE_FLUSH(hw);
3082 i40e_pf_config_irq0(struct i40e_hw *hw)
3086 /* read pending request and disable first */
3087 i40e_pf_disable_irq0(hw);
3089 * Enable all interrupt error options to detect possible errors,
3090 * other informative int are ignored
3092 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3093 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3094 I40E_PFINT_ICR0_ENA_GRST_MASK |
3095 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3096 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3097 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3098 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3099 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3101 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3102 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3103 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3105 /* Link no queues with irq0 */
3106 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3107 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3111 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3113 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3114 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3117 uint32_t index, offset, val;
3122 * Try to find which VF trigger a reset, use absolute VF id to access
3123 * since the reg is global register.
3125 for (i = 0; i < pf->vf_num; i++) {
3126 abs_vf_id = hw->func_caps.vf_base_id + i;
3127 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3128 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3129 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3130 /* VFR event occured */
3131 if (val & (0x1 << offset)) {
3134 /* Clear the event first */
3135 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3137 PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
3139 * Only notify a VF reset event occured,
3140 * don't trigger another SW reset
3142 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3143 if (ret != I40E_SUCCESS)
3144 PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
3150 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3152 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3153 struct i40e_arq_event_info info;
3154 uint16_t pending, opcode;
3157 info.msg_size = I40E_AQ_BUF_SZ;
3158 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3159 if (!info.msg_buf) {
3160 PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
3166 ret = i40e_clean_arq_element(hw, &info, &pending);
3168 if (ret != I40E_SUCCESS) {
3169 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3170 "aq_err: %u\n", hw->aq.asq_last_status);
3173 opcode = rte_le_to_cpu_16(info.desc.opcode);
3176 case i40e_aqc_opc_send_msg_to_pf:
3177 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3178 i40e_pf_host_handle_vf_msg(dev,
3179 rte_le_to_cpu_16(info.desc.retval),
3180 rte_le_to_cpu_32(info.desc.cookie_high),
3181 rte_le_to_cpu_32(info.desc.cookie_low),
3186 PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
3190 /* Reset the buffer after processing one */
3191 info.msg_size = I40E_AQ_BUF_SZ;
3193 rte_free(info.msg_buf);
3197 * Interrupt handler triggered by NIC for handling
3198 * specific interrupt.
3201 * Pointer to interrupt handle.
3203 * The address of parameter (struct rte_eth_dev *) regsitered before.
3209 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3212 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3213 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3214 uint32_t cause, enable;
3216 i40e_pf_disable_irq0(hw);
3218 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3219 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3221 /* Shared IRQ case, return */
3222 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3223 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3224 "no INT event to process\n", hw->pf_id);
3228 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3229 PMD_DRV_LOG(INFO, "INT:Link status changed\n");
3230 i40e_dev_link_update(dev, 0);
3233 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3234 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
3236 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3237 PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
3239 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3240 PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
3242 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3243 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
3245 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3246 PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
3248 /* Add processing func to deal with VF reset vent */
3249 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3250 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3251 i40e_dev_handle_vfr_event(dev);
3253 /* Find admin queue event */
3254 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3255 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3256 i40e_dev_handle_aq_msg(dev);
3260 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3261 /* Re-enable interrupt from device side */
3262 i40e_pf_enable_irq0(hw);
3263 /* Re-enable interrupt from host side */
3264 rte_intr_enable(&(dev->pci_dev->intr_handle));
3268 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3269 struct i40e_macvlan_filter *filter,
3272 int ele_num, ele_buff_size;
3273 int num, actual_num, i;
3274 int ret = I40E_SUCCESS;
3275 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3276 struct i40e_aqc_add_macvlan_element_data *req_list;
3278 if (filter == NULL || total == 0)
3279 return I40E_ERR_PARAM;
3280 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3281 ele_buff_size = hw->aq.asq_buf_size;
3283 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3284 if (req_list == NULL) {
3285 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3286 return I40E_ERR_NO_MEMORY;
3291 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3292 memset(req_list, 0, ele_buff_size);
3294 for (i = 0; i < actual_num; i++) {
3295 (void)rte_memcpy(req_list[i].mac_addr,
3296 &filter[num + i].macaddr, ETH_ADDR_LEN);
3297 req_list[i].vlan_tag =
3298 rte_cpu_to_le_16(filter[num + i].vlan_id);
3299 req_list[i].flags = rte_cpu_to_le_16(\
3300 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3301 req_list[i].queue_number = 0;
3304 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3306 if (ret != I40E_SUCCESS) {
3307 PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
3311 } while (num < total);
3319 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3320 struct i40e_macvlan_filter *filter,
3323 int ele_num, ele_buff_size;
3324 int num, actual_num, i;
3325 int ret = I40E_SUCCESS;
3326 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3327 struct i40e_aqc_remove_macvlan_element_data *req_list;
3329 if (filter == NULL || total == 0)
3330 return I40E_ERR_PARAM;
3332 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3333 ele_buff_size = hw->aq.asq_buf_size;
3335 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3336 if (req_list == NULL) {
3337 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3338 return I40E_ERR_NO_MEMORY;
3343 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3344 memset(req_list, 0, ele_buff_size);
3346 for (i = 0; i < actual_num; i++) {
3347 (void)rte_memcpy(req_list[i].mac_addr,
3348 &filter[num + i].macaddr, ETH_ADDR_LEN);
3349 req_list[i].vlan_tag =
3350 rte_cpu_to_le_16(filter[num + i].vlan_id);
3351 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3354 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3356 if (ret != I40E_SUCCESS) {
3357 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
3361 } while (num < total);
3368 /* Find out specific MAC filter */
3369 static struct i40e_mac_filter *
3370 i40e_find_mac_filter(struct i40e_vsi *vsi,
3371 struct ether_addr *macaddr)
3373 struct i40e_mac_filter *f;
3375 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3376 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3384 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3387 uint32_t vid_idx, vid_bit;
3389 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3390 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3392 if (vsi->vfta[vid_idx] & vid_bit)
3399 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3400 uint16_t vlan_id, bool on)
3402 uint32_t vid_idx, vid_bit;
3404 #define UINT32_BIT_MASK 0x1F
3405 #define VALID_VLAN_BIT_MASK 0xFFF
3406 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3407 * element first, then find the bits it belongs to
3409 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3411 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3414 vsi->vfta[vid_idx] |= vid_bit;
3416 vsi->vfta[vid_idx] &= ~vid_bit;
3420 * Find all vlan options for specific mac addr,
3421 * return with actual vlan found.
3424 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3425 struct i40e_macvlan_filter *mv_f,
3426 int num, struct ether_addr *addr)
3432 * Not to use i40e_find_vlan_filter to decrease the loop time,
3433 * although the code looks complex.
3435 if (num < vsi->vlan_num)
3436 return I40E_ERR_PARAM;
3439 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3441 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3442 if (vsi->vfta[j] & (1 << k)) {
3444 PMD_DRV_LOG(ERR, "vlan number "
3446 return I40E_ERR_PARAM;
3448 (void)rte_memcpy(&mv_f[i].macaddr,
3449 addr, ETH_ADDR_LEN);
3451 j * I40E_UINT32_BIT_SIZE + k;
3457 return I40E_SUCCESS;
3461 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3462 struct i40e_macvlan_filter *mv_f,
3467 struct i40e_mac_filter *f;
3469 if (num < vsi->mac_num)
3470 return I40E_ERR_PARAM;
3472 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3474 PMD_DRV_LOG(ERR, "buffer number not match\n");
3475 return I40E_ERR_PARAM;
3477 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3478 mv_f[i].vlan_id = vlan;
3482 return I40E_SUCCESS;
3486 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3489 struct i40e_mac_filter *f;
3490 struct i40e_macvlan_filter *mv_f;
3491 int ret = I40E_SUCCESS;
3493 if (vsi == NULL || vsi->mac_num == 0)
3494 return I40E_ERR_PARAM;
3496 /* Case that no vlan is set */
3497 if (vsi->vlan_num == 0)
3500 num = vsi->mac_num * vsi->vlan_num;
3502 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3504 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3505 return I40E_ERR_NO_MEMORY;
3509 if (vsi->vlan_num == 0) {
3510 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3511 (void)rte_memcpy(&mv_f[i].macaddr,
3512 &f->macaddr, ETH_ADDR_LEN);
3513 mv_f[i].vlan_id = 0;
3517 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3518 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3519 vsi->vlan_num, &f->macaddr);
3520 if (ret != I40E_SUCCESS)
3526 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3534 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3536 struct i40e_macvlan_filter *mv_f;
3538 int ret = I40E_SUCCESS;
3540 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3541 return I40E_ERR_PARAM;
3543 /* If it's already set, just return */
3544 if (i40e_find_vlan_filter(vsi,vlan))
3545 return I40E_SUCCESS;
3547 mac_num = vsi->mac_num;
3550 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3551 return I40E_ERR_PARAM;
3554 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3557 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3558 return I40E_ERR_NO_MEMORY;
3561 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3563 if (ret != I40E_SUCCESS)
3566 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3568 if (ret != I40E_SUCCESS)
3571 i40e_set_vlan_filter(vsi, vlan, 1);
3581 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3583 struct i40e_macvlan_filter *mv_f;
3585 int ret = I40E_SUCCESS;
3588 * Vlan 0 is the generic filter for untagged packets
3589 * and can't be removed.
3591 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3592 return I40E_ERR_PARAM;
3594 /* If can't find it, just return */
3595 if (!i40e_find_vlan_filter(vsi, vlan))
3596 return I40E_ERR_PARAM;
3598 mac_num = vsi->mac_num;
3601 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3602 return I40E_ERR_PARAM;
3605 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3608 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3609 return I40E_ERR_NO_MEMORY;
3612 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3614 if (ret != I40E_SUCCESS)
3617 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3619 if (ret != I40E_SUCCESS)
3622 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3623 if (vsi->vlan_num == 1) {
3624 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3625 if (ret != I40E_SUCCESS)
3628 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3629 if (ret != I40E_SUCCESS)
3633 i40e_set_vlan_filter(vsi, vlan, 0);
3643 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3645 struct i40e_mac_filter *f;
3646 struct i40e_macvlan_filter *mv_f;
3648 int ret = I40E_SUCCESS;
3650 /* If it's add and we've config it, return */
3651 f = i40e_find_mac_filter(vsi, addr);
3653 return I40E_SUCCESS;
3656 * If vlan_num is 0, that's the first time to add mac,
3657 * set mask for vlan_id 0.
3659 if (vsi->vlan_num == 0) {
3660 i40e_set_vlan_filter(vsi, 0, 1);
3664 vlan_num = vsi->vlan_num;
3666 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3668 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3669 return I40E_ERR_NO_MEMORY;
3672 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3673 if (ret != I40E_SUCCESS)
3676 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3677 if (ret != I40E_SUCCESS)
3680 /* Add the mac addr into mac list */
3681 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3683 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3684 ret = I40E_ERR_NO_MEMORY;
3687 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3688 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3699 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3701 struct i40e_mac_filter *f;
3702 struct i40e_macvlan_filter *mv_f;
3704 int ret = I40E_SUCCESS;
3706 /* Can't find it, return an error */
3707 f = i40e_find_mac_filter(vsi, addr);
3709 return I40E_ERR_PARAM;
3711 vlan_num = vsi->vlan_num;
3712 if (vlan_num == 0) {
3713 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
3714 return I40E_ERR_PARAM;
3716 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3718 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3719 return I40E_ERR_NO_MEMORY;
3722 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3723 if (ret != I40E_SUCCESS)
3726 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3727 if (ret != I40E_SUCCESS)
3730 /* Remove the mac addr into mac list */
3731 TAILQ_REMOVE(&vsi->mac_list, f, next);
3741 /* Configure hash enable flags for RSS */
3743 i40e_config_hena(uint64_t flags)
3750 if (flags & ETH_RSS_NONF_IPV4_UDP)
3751 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3752 if (flags & ETH_RSS_NONF_IPV4_TCP)
3753 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3754 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3755 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3756 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3757 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3758 if (flags & ETH_RSS_FRAG_IPV4)
3759 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3760 if (flags & ETH_RSS_NONF_IPV6_UDP)
3761 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3762 if (flags & ETH_RSS_NONF_IPV6_TCP)
3763 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3764 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3765 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3766 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3767 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3768 if (flags & ETH_RSS_FRAG_IPV6)
3769 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3770 if (flags & ETH_RSS_L2_PAYLOAD)
3771 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3776 /* Parse the hash enable flags */
3778 i40e_parse_hena(uint64_t flags)
3780 uint64_t rss_hf = 0;
3785 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3786 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3787 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3788 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3789 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3790 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3791 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3792 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3793 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3794 rss_hf |= ETH_RSS_FRAG_IPV4;
3795 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3796 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3797 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3798 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3799 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3800 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3801 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3802 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3803 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3804 rss_hf |= ETH_RSS_FRAG_IPV6;
3805 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3806 rss_hf |= ETH_RSS_L2_PAYLOAD;
3813 i40e_pf_disable_rss(struct i40e_pf *pf)
3815 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3818 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3819 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3820 hena &= ~I40E_RSS_HENA_ALL;
3821 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3822 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3823 I40E_WRITE_FLUSH(hw);
3827 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
3830 uint8_t hash_key_len;
3835 hash_key = (uint32_t *)(rss_conf->rss_key);
3836 hash_key_len = rss_conf->rss_key_len;
3837 if (hash_key != NULL && hash_key_len >=
3838 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3839 /* Fill in RSS hash key */
3840 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3841 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
3844 rss_hf = rss_conf->rss_hf;
3845 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3846 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3847 hena &= ~I40E_RSS_HENA_ALL;
3848 hena |= i40e_config_hena(rss_hf);
3849 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3850 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3851 I40E_WRITE_FLUSH(hw);
3857 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
3858 struct rte_eth_rss_conf *rss_conf)
3860 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3861 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
3864 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3865 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3866 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
3867 if (rss_hf != 0) /* Enable RSS */
3869 return 0; /* Nothing to do */
3872 if (rss_hf == 0) /* Disable RSS */
3875 return i40e_hw_rss_hash_set(hw, rss_conf);
3879 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3880 struct rte_eth_rss_conf *rss_conf)
3882 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3883 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
3887 if (hash_key != NULL) {
3888 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3889 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
3890 rss_conf->rss_key_len = i * sizeof(uint32_t);
3892 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3893 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3894 rss_conf->rss_hf = i40e_parse_hena(hena);
3901 i40e_pf_config_rss(struct i40e_pf *pf)
3903 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3904 struct rte_eth_rss_conf rss_conf;
3905 uint32_t i, lut = 0;
3906 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
3908 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
3911 lut = (lut << 8) | (j & ((0x1 <<
3912 hw->func_caps.rss_table_entry_width) - 1));
3914 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
3917 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
3918 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
3919 i40e_pf_disable_rss(pf);
3922 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
3923 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3924 /* Calculate the default hash key */
3925 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3926 rss_key_default[i] = (uint32_t)rte_rand();
3927 rss_conf.rss_key = (uint8_t *)rss_key_default;
3928 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3932 return i40e_hw_rss_hash_set(hw, &rss_conf);
3936 i40e_pf_config_mq_rx(struct i40e_pf *pf)
3938 if (!pf->dev_data->sriov.active) {
3939 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
3941 i40e_pf_config_rss(pf);
3944 i40e_pf_disable_rss(pf);
3953 i40e_disable_queue(struct i40e_hw *hw, uint16_t q_idx)
3958 /* Disable TX queue */
3959 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3960 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3961 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3962 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 0x1)))
3964 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3966 if (i >= I40E_CHK_Q_ENA_COUNT) {
3967 PMD_DRV_LOG(ERR, "Failed to disable "
3968 "tx queue[%u]\n", q_idx);
3969 return I40E_ERR_TIMEOUT;
3972 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3973 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3974 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3975 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3976 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3977 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3978 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3979 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3982 if (i >= I40E_CHK_Q_ENA_COUNT) {
3983 PMD_DRV_LOG(ERR, "Failed to disable "
3984 "tx queue[%u]\n", q_idx);
3985 return I40E_ERR_TIMEOUT;
3989 /* Disable RX queue */
3990 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3991 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3992 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3993 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3995 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3997 if (i >= I40E_CHK_Q_ENA_COUNT) {
3998 PMD_DRV_LOG(ERR, "Failed to disable "
3999 "rx queue[%u]\n", q_idx);
4000 return I40E_ERR_TIMEOUT;
4003 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
4004 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4005 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
4006 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
4007 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
4008 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4009 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4010 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4013 if (i >= I40E_CHK_Q_ENA_COUNT) {
4014 PMD_DRV_LOG(ERR, "Failed to disable "
4015 "rx queue[%u]\n", q_idx);
4016 return I40E_ERR_TIMEOUT;
4020 return I40E_SUCCESS;
4024 i40e_pf_disable_all_queues(struct i40e_hw *hw)
4027 uint16_t firstq, lastq, maxq, i;
4029 reg = I40E_READ_REG(hw, I40E_PFLAN_QALLOC);
4030 if (!(reg & I40E_PFLAN_QALLOC_VALID_MASK)) {
4031 PMD_DRV_LOG(INFO, "PF queue allocation is invalid\n");
4032 return I40E_ERR_PARAM;
4034 firstq = reg & I40E_PFLAN_QALLOC_FIRSTQ_MASK;
4035 lastq = (reg & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4036 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4037 maxq = lastq - firstq;
4038 for (i = 0; i <= maxq; i++) {
4039 ret = i40e_disable_queue(hw, i);
4040 if (ret != I40E_SUCCESS)
4043 return I40E_SUCCESS;