4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
90 #define I40E_RSS_OFFLOAD_ALL ( \
91 ETH_RSS_NONF_IPV4_UDP | \
92 ETH_RSS_NONF_IPV4_TCP | \
93 ETH_RSS_NONF_IPV4_SCTP | \
94 ETH_RSS_NONF_IPV4_OTHER | \
96 ETH_RSS_NONF_IPV6_UDP | \
97 ETH_RSS_NONF_IPV6_TCP | \
98 ETH_RSS_NONF_IPV6_SCTP | \
99 ETH_RSS_NONF_IPV6_OTHER | \
100 ETH_RSS_FRAG_IPV6 | \
103 /* All bits of RSS hash enable */
104 #define I40E_RSS_HENA_ALL ( \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
109 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
112 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
113 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
114 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
116 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
117 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
118 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
120 static int eth_i40e_dev_init(\
121 __attribute__((unused)) struct eth_driver *eth_drv,
122 struct rte_eth_dev *eth_dev);
123 static int i40e_dev_configure(struct rte_eth_dev *dev);
124 static int i40e_dev_start(struct rte_eth_dev *dev);
125 static void i40e_dev_stop(struct rte_eth_dev *dev);
126 static void i40e_dev_close(struct rte_eth_dev *dev);
127 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
130 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
131 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
132 struct rte_eth_stats *stats);
133 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
134 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
138 static void i40e_dev_info_get(struct rte_eth_dev *dev,
139 struct rte_eth_dev_info *dev_info);
140 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
143 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
144 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
145 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
148 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
149 static int i40e_dev_led_on(struct rte_eth_dev *dev);
150 static int i40e_dev_led_off(struct rte_eth_dev *dev);
151 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
152 struct rte_eth_fc_conf *fc_conf);
153 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
154 struct rte_eth_pfc_conf *pfc_conf);
155 static void i40e_macaddr_add(struct rte_eth_dev *dev,
156 struct ether_addr *mac_addr,
159 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
160 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
161 struct rte_eth_rss_reta *reta_conf);
162 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
163 struct rte_eth_rss_reta *reta_conf);
165 static int i40e_get_cap(struct i40e_hw *hw);
166 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
167 static int i40e_pf_setup(struct i40e_pf *pf);
168 static int i40e_vsi_init(struct i40e_vsi *vsi);
169 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
170 bool offset_loaded, uint64_t *offset, uint64_t *stat);
171 static void i40e_stat_update_48(struct i40e_hw *hw,
177 static void i40e_pf_config_irq0(struct i40e_hw *hw);
178 static void i40e_dev_interrupt_handler(
179 __rte_unused struct rte_intr_handle *handle, void *param);
180 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
181 uint32_t base, uint32_t num);
182 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
183 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
185 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
187 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
188 static int i40e_veb_release(struct i40e_veb *veb);
189 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
190 struct i40e_vsi *vsi);
191 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
192 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
193 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
194 struct i40e_macvlan_filter *mv_f,
196 struct ether_addr *addr);
197 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
198 struct i40e_macvlan_filter *mv_f,
201 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
202 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
203 struct rte_eth_rss_conf *rss_conf);
204 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
205 struct rte_eth_rss_conf *rss_conf);
207 /* Default hash key buffer for RSS */
208 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
210 static struct rte_pci_id pci_id_i40e_map[] = {
211 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
212 #include "rte_pci_dev_ids.h"
213 { .vendor_id = 0, /* sentinel */ },
216 static struct eth_dev_ops i40e_eth_dev_ops = {
217 .dev_configure = i40e_dev_configure,
218 .dev_start = i40e_dev_start,
219 .dev_stop = i40e_dev_stop,
220 .dev_close = i40e_dev_close,
221 .promiscuous_enable = i40e_dev_promiscuous_enable,
222 .promiscuous_disable = i40e_dev_promiscuous_disable,
223 .allmulticast_enable = i40e_dev_allmulticast_enable,
224 .allmulticast_disable = i40e_dev_allmulticast_disable,
225 .link_update = i40e_dev_link_update,
226 .stats_get = i40e_dev_stats_get,
227 .stats_reset = i40e_dev_stats_reset,
228 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
229 .dev_infos_get = i40e_dev_info_get,
230 .vlan_filter_set = i40e_vlan_filter_set,
231 .vlan_tpid_set = i40e_vlan_tpid_set,
232 .vlan_offload_set = i40e_vlan_offload_set,
233 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
234 .vlan_pvid_set = i40e_vlan_pvid_set,
235 .rx_queue_setup = i40e_dev_rx_queue_setup,
236 .rx_queue_release = i40e_dev_rx_queue_release,
237 .rx_queue_count = i40e_dev_rx_queue_count,
238 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
239 .tx_queue_setup = i40e_dev_tx_queue_setup,
240 .tx_queue_release = i40e_dev_tx_queue_release,
241 .dev_led_on = i40e_dev_led_on,
242 .dev_led_off = i40e_dev_led_off,
243 .flow_ctrl_set = i40e_flow_ctrl_set,
244 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
245 .mac_addr_add = i40e_macaddr_add,
246 .mac_addr_remove = i40e_macaddr_remove,
247 .reta_update = i40e_dev_rss_reta_update,
248 .reta_query = i40e_dev_rss_reta_query,
249 .rss_hash_update = i40e_dev_rss_hash_update,
250 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
253 static struct eth_driver rte_i40e_pmd = {
255 .name = "rte_i40e_pmd",
256 .id_table = pci_id_i40e_map,
257 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
259 .eth_dev_init = eth_i40e_dev_init,
260 .dev_private_size = sizeof(struct i40e_adapter),
264 i40e_prev_power_of_2(int n)
282 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
283 struct rte_eth_link *link)
285 struct rte_eth_link *dst = link;
286 struct rte_eth_link *src = &(dev->data->dev_link);
288 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
289 *(uint64_t *)src) == 0)
296 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
297 struct rte_eth_link *link)
299 struct rte_eth_link *dst = &(dev->data->dev_link);
300 struct rte_eth_link *src = link;
302 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
303 *(uint64_t *)src) == 0)
310 * Driver initialization routine.
311 * Invoked once at EAL init time.
312 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
315 rte_i40e_pmd_init(const char *name __rte_unused,
316 const char *params __rte_unused)
318 PMD_INIT_FUNC_TRACE();
319 rte_eth_driver_register(&rte_i40e_pmd);
324 static struct rte_driver rte_i40e_driver = {
326 .init = rte_i40e_pmd_init,
329 PMD_REGISTER_DRIVER(rte_i40e_driver);
332 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
333 struct rte_eth_dev *dev)
335 struct rte_pci_device *pci_dev;
336 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
337 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
338 struct i40e_vsi *vsi;
343 PMD_INIT_FUNC_TRACE();
345 dev->dev_ops = &i40e_eth_dev_ops;
346 dev->rx_pkt_burst = i40e_recv_pkts;
347 dev->tx_pkt_burst = i40e_xmit_pkts;
349 /* for secondary processes, we don't initialise any further as primary
350 * has already done this work. Only check we don't need a different
352 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
353 if (dev->data->scattered_rx)
354 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
357 pci_dev = dev->pci_dev;
358 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
359 pf->adapter->eth_dev = dev;
360 pf->dev_data = dev->data;
362 hw->back = I40E_PF_TO_ADAPTER(pf);
363 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
365 PMD_INIT_LOG(ERR, "Hardware is not available, "
366 "as address is NULL\n");
370 hw->vendor_id = pci_dev->id.vendor_id;
371 hw->device_id = pci_dev->id.device_id;
372 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
373 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
374 hw->bus.device = pci_dev->addr.devid;
375 hw->bus.func = pci_dev->addr.function;
377 /* Make sure all is clean before doing PF reset */
380 /* Reset here to make sure all is clean for each PF */
381 ret = i40e_pf_reset(hw);
383 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
387 /* Initialize the shared code (base driver) */
388 ret = i40e_init_shared_code(hw);
390 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
394 /* Initialize the parameters for adminq */
395 i40e_init_adminq_parameter(hw);
396 ret = i40e_init_adminq(hw);
397 if (ret != I40E_SUCCESS) {
398 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
401 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
402 "%02d.%02d.%02d eetrack %04x\n",
403 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
404 hw->aq.api_maj_ver, hw->aq.api_min_ver,
405 ((hw->nvm.version >> 12) & 0xf),
406 ((hw->nvm.version >> 4) & 0xff),
407 (hw->nvm.version & 0xf), hw->nvm.eetrack);
410 ret = i40e_aq_stop_lldp(hw, true, NULL);
411 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
412 PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
415 i40e_clear_pxe_mode(hw);
417 /* Get hw capabilities */
418 ret = i40e_get_cap(hw);
419 if (ret != I40E_SUCCESS) {
420 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
421 goto err_get_capabilities;
424 /* Initialize parameters for PF */
425 ret = i40e_pf_parameter_init(dev);
427 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
428 goto err_parameter_init;
431 /* Initialize the queue management */
432 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
434 PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
435 goto err_qp_pool_init;
437 ret = i40e_res_pool_init(&pf->msix_pool, 1,
438 hw->func_caps.num_msix_vectors - 1);
440 PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
441 goto err_msix_pool_init;
444 /* Initialize lan hmc */
445 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
446 hw->func_caps.num_rx_qp, 0, 0);
447 if (ret != I40E_SUCCESS) {
448 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
449 goto err_init_lan_hmc;
452 /* Configure lan hmc */
453 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
454 if (ret != I40E_SUCCESS) {
455 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
456 goto err_configure_lan_hmc;
459 /* Get and check the mac address */
460 i40e_get_mac_addr(hw, hw->mac.addr);
461 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
462 PMD_INIT_LOG(ERR, "mac address is not valid");
464 goto err_get_mac_addr;
466 /* Copy the permanent MAC address */
467 ether_addr_copy((struct ether_addr *) hw->mac.addr,
468 (struct ether_addr *) hw->mac.perm_addr);
470 /* Disable flow control */
471 hw->fc.requested_mode = I40E_FC_NONE;
472 i40e_set_fc(hw, &aq_fail, TRUE);
474 /* PF setup, which includes VSI setup */
475 ret = i40e_pf_setup(pf);
477 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
478 goto err_setup_pf_switch;
483 /* Disable double vlan by default */
484 i40e_vsi_config_double_vlan(vsi, FALSE);
486 if (!vsi->max_macaddrs)
487 len = ETHER_ADDR_LEN;
489 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
491 /* Should be after VSI initialized */
492 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
493 if (!dev->data->mac_addrs) {
494 PMD_INIT_LOG(ERR, "Failed to allocated memory "
495 "for storing mac address");
496 goto err_get_mac_addr;
498 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
499 &dev->data->mac_addrs[0]);
501 /* initialize pf host driver to setup SRIOV resource if applicable */
502 i40e_pf_host_init(dev);
504 /* register callback func to eal lib */
505 rte_intr_callback_register(&(pci_dev->intr_handle),
506 i40e_dev_interrupt_handler, (void *)dev);
508 /* configure and enable device interrupt */
509 i40e_pf_config_irq0(hw);
510 i40e_pf_enable_irq0(hw);
512 /* enable uio intr after callback register */
513 rte_intr_enable(&(pci_dev->intr_handle));
518 rte_free(pf->main_vsi);
520 err_configure_lan_hmc:
521 (void)i40e_shutdown_lan_hmc(hw);
523 i40e_res_pool_destroy(&pf->msix_pool);
525 i40e_res_pool_destroy(&pf->qp_pool);
528 err_get_capabilities:
529 (void)i40e_shutdown_adminq(hw);
535 i40e_dev_configure(struct rte_eth_dev *dev)
537 return i40e_dev_init_vlan(dev);
541 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
543 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
544 uint16_t msix_vect = vsi->msix_intr;
547 for (i = 0; i < vsi->nb_qps; i++) {
548 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
549 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
553 if (vsi->type != I40E_VSI_SRIOV) {
554 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
555 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
559 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
560 vsi->user_param + (msix_vect - 1);
562 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
564 I40E_WRITE_FLUSH(hw);
567 static inline uint16_t
568 i40e_calc_itr_interval(int16_t interval)
570 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
571 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
573 /* Convert to hardware count, as writing each 1 represents 2 us */
578 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
581 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
582 uint16_t msix_vect = vsi->msix_intr;
583 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
586 for (i = 0; i < vsi->nb_qps; i++)
587 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
589 /* Bind all RX queues to allocated MSIX interrupt */
590 for (i = 0; i < vsi->nb_qps; i++) {
591 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
592 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
593 ((vsi->base_queue + i + 1) <<
594 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
595 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
596 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
598 if (i == vsi->nb_qps - 1)
599 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
600 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
603 /* Write first RX queue to Link list register as the head element */
604 if (vsi->type != I40E_VSI_SRIOV) {
605 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
606 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
607 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
609 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
610 msix_vect - 1), interval);
612 /* Disable auto-mask on enabling of all none-zero interrupt */
613 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
614 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
618 /* num_msix_vectors_vf needs to minus irq0 */
619 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
620 vsi->user_param + (msix_vect - 1);
622 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
623 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
624 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
627 I40E_WRITE_FLUSH(hw);
631 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
633 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
634 uint16_t interval = i40e_calc_itr_interval(\
635 RTE_LIBRTE_I40E_ITR_INTERVAL);
637 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
638 I40E_PFINT_DYN_CTLN_INTENA_MASK |
639 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
640 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
641 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
645 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
647 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
649 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
653 i40e_dev_start(struct rte_eth_dev *dev)
655 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
656 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
657 struct i40e_vsi *vsi = pf->main_vsi;
661 ret = i40e_vsi_init(vsi);
662 if (ret != I40E_SUCCESS) {
663 PMD_DRV_LOG(ERR, "Failed to init VSI\n");
667 /* Map queues with MSIX interrupt */
668 i40e_vsi_queues_bind_intr(vsi);
669 i40e_vsi_enable_queues_intr(vsi);
671 /* Enable all queues which have been configured */
672 ret = i40e_vsi_switch_queues(vsi, TRUE);
673 if (ret != I40E_SUCCESS) {
674 PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
678 /* Enable receiving broadcast packets */
679 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
680 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
681 if (ret != I40E_SUCCESS)
682 PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
688 i40e_vsi_switch_queues(vsi, FALSE);
689 i40e_dev_clear_queues(dev);
695 i40e_dev_stop(struct rte_eth_dev *dev)
697 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
698 struct i40e_vsi *vsi = pf->main_vsi;
700 /* Disable all queues */
701 i40e_vsi_switch_queues(vsi, FALSE);
703 /* Clear all queues and release memory */
704 i40e_dev_clear_queues(dev);
706 /* un-map queues with interrupt registers */
707 i40e_vsi_disable_queues_intr(vsi);
708 i40e_vsi_queues_unbind_intr(vsi);
712 i40e_dev_close(struct rte_eth_dev *dev)
714 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
715 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718 PMD_INIT_FUNC_TRACE();
722 /* Disable interrupt */
723 i40e_pf_disable_irq0(hw);
724 rte_intr_disable(&(dev->pci_dev->intr_handle));
726 /* shutdown and destroy the HMC */
727 i40e_shutdown_lan_hmc(hw);
729 /* release all the existing VSIs and VEBs */
730 i40e_vsi_release(pf->main_vsi);
732 /* shutdown the adminq */
733 i40e_aq_queue_shutdown(hw, true);
734 i40e_shutdown_adminq(hw);
736 i40e_res_pool_destroy(&pf->qp_pool);
737 i40e_res_pool_destroy(&pf->msix_pool);
739 /* force a PF reset to clean anything leftover */
740 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
741 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
742 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
743 I40E_WRITE_FLUSH(hw);
747 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
749 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
750 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
751 struct i40e_vsi *vsi = pf->main_vsi;
754 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
756 if (status != I40E_SUCCESS)
757 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
761 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
763 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
764 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
765 struct i40e_vsi *vsi = pf->main_vsi;
768 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
770 if (status != I40E_SUCCESS)
771 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
775 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
777 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
778 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
779 struct i40e_vsi *vsi = pf->main_vsi;
782 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
783 if (ret != I40E_SUCCESS)
784 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
788 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
790 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
791 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
792 struct i40e_vsi *vsi = pf->main_vsi;
795 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
796 vsi->seid, FALSE, NULL);
797 if (ret != I40E_SUCCESS)
798 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
802 i40e_dev_link_update(struct rte_eth_dev *dev,
803 __rte_unused int wait_to_complete)
805 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
806 struct i40e_link_status link_status;
807 struct rte_eth_link link, old;
810 memset(&link, 0, sizeof(link));
811 memset(&old, 0, sizeof(old));
812 memset(&link_status, 0, sizeof(link_status));
813 rte_i40e_dev_atomic_read_link_status(dev, &old);
815 /* Get link status information from hardware */
816 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
817 if (status != I40E_SUCCESS) {
818 link.link_speed = ETH_LINK_SPEED_100;
819 link.link_duplex = ETH_LINK_FULL_DUPLEX;
820 PMD_DRV_LOG(ERR, "Failed to get link info\n");
824 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
826 if (!link.link_status)
829 /* i40e uses full duplex only */
830 link.link_duplex = ETH_LINK_FULL_DUPLEX;
832 /* Parse the link status */
833 switch (link_status.link_speed) {
834 case I40E_LINK_SPEED_100MB:
835 link.link_speed = ETH_LINK_SPEED_100;
837 case I40E_LINK_SPEED_1GB:
838 link.link_speed = ETH_LINK_SPEED_1000;
840 case I40E_LINK_SPEED_10GB:
841 link.link_speed = ETH_LINK_SPEED_10G;
843 case I40E_LINK_SPEED_20GB:
844 link.link_speed = ETH_LINK_SPEED_20G;
846 case I40E_LINK_SPEED_40GB:
847 link.link_speed = ETH_LINK_SPEED_40G;
850 link.link_speed = ETH_LINK_SPEED_100;
855 rte_i40e_dev_atomic_write_link_status(dev, &link);
856 if (link.link_status == old.link_status)
862 /* Get all the statistics of a VSI */
864 i40e_update_vsi_stats(struct i40e_vsi *vsi)
866 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
867 struct i40e_eth_stats *nes = &vsi->eth_stats;
868 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
869 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
871 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
872 vsi->offset_loaded, &oes->rx_bytes,
874 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
875 vsi->offset_loaded, &oes->rx_unicast,
877 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
878 vsi->offset_loaded, &oes->rx_multicast,
880 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
881 vsi->offset_loaded, &oes->rx_broadcast,
883 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
884 &oes->rx_discards, &nes->rx_discards);
885 /* GLV_REPC not supported */
886 /* GLV_RMPC not supported */
887 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
888 &oes->rx_unknown_protocol,
889 &nes->rx_unknown_protocol);
890 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
891 vsi->offset_loaded, &oes->tx_bytes,
893 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
894 vsi->offset_loaded, &oes->tx_unicast,
896 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
897 vsi->offset_loaded, &oes->tx_multicast,
899 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
900 vsi->offset_loaded, &oes->tx_broadcast,
902 /* GLV_TDPC not supported */
903 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
904 &oes->tx_errors, &nes->tx_errors);
905 vsi->offset_loaded = true;
907 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
908 printf("***************** VSI[%u] stats start *******************\n",
910 printf("rx_bytes: %lu\n", nes->rx_bytes);
911 printf("rx_unicast: %lu\n", nes->rx_unicast);
912 printf("rx_multicast: %lu\n", nes->rx_multicast);
913 printf("rx_broadcast: %lu\n", nes->rx_broadcast);
914 printf("rx_discards: %lu\n", nes->rx_discards);
915 printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
916 printf("tx_bytes: %lu\n", nes->tx_bytes);
917 printf("tx_unicast: %lu\n", nes->tx_unicast);
918 printf("tx_multicast: %lu\n", nes->tx_multicast);
919 printf("tx_broadcast: %lu\n", nes->tx_broadcast);
920 printf("tx_discards: %lu\n", nes->tx_discards);
921 printf("tx_errors: %lu\n", nes->tx_errors);
922 printf("***************** VSI[%u] stats end *******************\n",
924 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
927 /* Get all statistics of a port */
929 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
932 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
933 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
934 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
935 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
937 /* Get statistics of struct i40e_eth_stats */
938 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
939 I40E_GLPRT_GORCL(hw->port),
940 pf->offset_loaded, &os->eth.rx_bytes,
942 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
943 I40E_GLPRT_UPRCL(hw->port),
944 pf->offset_loaded, &os->eth.rx_unicast,
945 &ns->eth.rx_unicast);
946 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
947 I40E_GLPRT_MPRCL(hw->port),
948 pf->offset_loaded, &os->eth.rx_multicast,
949 &ns->eth.rx_multicast);
950 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
951 I40E_GLPRT_BPRCL(hw->port),
952 pf->offset_loaded, &os->eth.rx_broadcast,
953 &ns->eth.rx_broadcast);
954 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
955 pf->offset_loaded, &os->eth.rx_discards,
956 &ns->eth.rx_discards);
957 /* GLPRT_REPC not supported */
958 /* GLPRT_RMPC not supported */
959 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
961 &os->eth.rx_unknown_protocol,
962 &ns->eth.rx_unknown_protocol);
963 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
964 I40E_GLPRT_GOTCL(hw->port),
965 pf->offset_loaded, &os->eth.tx_bytes,
967 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
968 I40E_GLPRT_UPTCL(hw->port),
969 pf->offset_loaded, &os->eth.tx_unicast,
970 &ns->eth.tx_unicast);
971 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
972 I40E_GLPRT_MPTCL(hw->port),
973 pf->offset_loaded, &os->eth.tx_multicast,
974 &ns->eth.tx_multicast);
975 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
976 I40E_GLPRT_BPTCL(hw->port),
977 pf->offset_loaded, &os->eth.tx_broadcast,
978 &ns->eth.tx_broadcast);
979 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
980 pf->offset_loaded, &os->eth.tx_discards,
981 &ns->eth.tx_discards);
982 /* GLPRT_TEPC not supported */
984 /* additional port specific stats */
985 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
986 pf->offset_loaded, &os->tx_dropped_link_down,
987 &ns->tx_dropped_link_down);
988 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
989 pf->offset_loaded, &os->crc_errors,
991 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
992 pf->offset_loaded, &os->illegal_bytes,
994 /* GLPRT_ERRBC not supported */
995 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
996 pf->offset_loaded, &os->mac_local_faults,
997 &ns->mac_local_faults);
998 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
999 pf->offset_loaded, &os->mac_remote_faults,
1000 &ns->mac_remote_faults);
1001 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1002 pf->offset_loaded, &os->rx_length_errors,
1003 &ns->rx_length_errors);
1004 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1005 pf->offset_loaded, &os->link_xon_rx,
1007 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1008 pf->offset_loaded, &os->link_xoff_rx,
1010 for (i = 0; i < 8; i++) {
1011 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1013 &os->priority_xon_rx[i],
1014 &ns->priority_xon_rx[i]);
1015 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1017 &os->priority_xoff_rx[i],
1018 &ns->priority_xoff_rx[i]);
1020 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1021 pf->offset_loaded, &os->link_xon_tx,
1023 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1024 pf->offset_loaded, &os->link_xoff_tx,
1026 for (i = 0; i < 8; i++) {
1027 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1029 &os->priority_xon_tx[i],
1030 &ns->priority_xon_tx[i]);
1031 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1033 &os->priority_xoff_tx[i],
1034 &ns->priority_xoff_tx[i]);
1035 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1037 &os->priority_xon_2_xoff[i],
1038 &ns->priority_xon_2_xoff[i]);
1040 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1041 I40E_GLPRT_PRC64L(hw->port),
1042 pf->offset_loaded, &os->rx_size_64,
1044 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1045 I40E_GLPRT_PRC127L(hw->port),
1046 pf->offset_loaded, &os->rx_size_127,
1048 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1049 I40E_GLPRT_PRC255L(hw->port),
1050 pf->offset_loaded, &os->rx_size_255,
1052 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1053 I40E_GLPRT_PRC511L(hw->port),
1054 pf->offset_loaded, &os->rx_size_511,
1056 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1057 I40E_GLPRT_PRC1023L(hw->port),
1058 pf->offset_loaded, &os->rx_size_1023,
1060 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1061 I40E_GLPRT_PRC1522L(hw->port),
1062 pf->offset_loaded, &os->rx_size_1522,
1064 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1065 I40E_GLPRT_PRC9522L(hw->port),
1066 pf->offset_loaded, &os->rx_size_big,
1068 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1069 pf->offset_loaded, &os->rx_undersize,
1071 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1072 pf->offset_loaded, &os->rx_fragments,
1074 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1075 pf->offset_loaded, &os->rx_oversize,
1077 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1078 pf->offset_loaded, &os->rx_jabber,
1080 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1081 I40E_GLPRT_PTC64L(hw->port),
1082 pf->offset_loaded, &os->tx_size_64,
1084 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1085 I40E_GLPRT_PTC127L(hw->port),
1086 pf->offset_loaded, &os->tx_size_127,
1088 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1089 I40E_GLPRT_PTC255L(hw->port),
1090 pf->offset_loaded, &os->tx_size_255,
1092 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1093 I40E_GLPRT_PTC511L(hw->port),
1094 pf->offset_loaded, &os->tx_size_511,
1096 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1097 I40E_GLPRT_PTC1023L(hw->port),
1098 pf->offset_loaded, &os->tx_size_1023,
1100 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1101 I40E_GLPRT_PTC1522L(hw->port),
1102 pf->offset_loaded, &os->tx_size_1522,
1104 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1105 I40E_GLPRT_PTC9522L(hw->port),
1106 pf->offset_loaded, &os->tx_size_big,
1108 /* GLPRT_MSPDC not supported */
1109 /* GLPRT_XEC not supported */
1111 pf->offset_loaded = true;
1113 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1114 ns->eth.rx_broadcast;
1115 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1116 ns->eth.tx_broadcast;
1117 stats->ibytes = ns->eth.rx_bytes;
1118 stats->obytes = ns->eth.tx_bytes;
1119 stats->oerrors = ns->eth.tx_errors;
1120 stats->imcasts = ns->eth.rx_multicast;
1123 i40e_update_vsi_stats(pf->main_vsi);
1125 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1126 printf("***************** PF stats start *******************\n");
1127 printf("rx_bytes: %lu\n", ns->eth.rx_bytes);
1128 printf("rx_unicast: %lu\n", ns->eth.rx_unicast);
1129 printf("rx_multicast: %lu\n", ns->eth.rx_multicast);
1130 printf("rx_broadcast: %lu\n", ns->eth.rx_broadcast);
1131 printf("rx_discards: %lu\n", ns->eth.rx_discards);
1132 printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
1133 printf("tx_bytes: %lu\n", ns->eth.tx_bytes);
1134 printf("tx_unicast: %lu\n", ns->eth.tx_unicast);
1135 printf("tx_multicast: %lu\n", ns->eth.tx_multicast);
1136 printf("tx_broadcast: %lu\n", ns->eth.tx_broadcast);
1137 printf("tx_discards: %lu\n", ns->eth.tx_discards);
1138 printf("tx_errors: %lu\n", ns->eth.tx_errors);
1140 printf("tx_dropped_link_down: %lu\n", ns->tx_dropped_link_down);
1141 printf("crc_errors: %lu\n", ns->crc_errors);
1142 printf("illegal_bytes: %lu\n", ns->illegal_bytes);
1143 printf("error_bytes: %lu\n", ns->error_bytes);
1144 printf("mac_local_faults: %lu\n", ns->mac_local_faults);
1145 printf("mac_remote_faults: %lu\n", ns->mac_remote_faults);
1146 printf("rx_length_errors: %lu\n", ns->rx_length_errors);
1147 printf("link_xon_rx: %lu\n", ns->link_xon_rx);
1148 printf("link_xoff_rx: %lu\n", ns->link_xoff_rx);
1149 for (i = 0; i < 8; i++) {
1150 printf("priority_xon_rx[%d]: %lu\n",
1151 i, ns->priority_xon_rx[i]);
1152 printf("priority_xoff_rx[%d]: %lu\n",
1153 i, ns->priority_xoff_rx[i]);
1155 printf("link_xon_tx: %lu\n", ns->link_xon_tx);
1156 printf("link_xoff_tx: %lu\n", ns->link_xoff_tx);
1157 for (i = 0; i < 8; i++) {
1158 printf("priority_xon_tx[%d]: %lu\n",
1159 i, ns->priority_xon_tx[i]);
1160 printf("priority_xoff_tx[%d]: %lu\n",
1161 i, ns->priority_xoff_tx[i]);
1162 printf("priority_xon_2_xoff[%d]: %lu\n",
1163 i, ns->priority_xon_2_xoff[i]);
1165 printf("rx_size_64: %lu\n", ns->rx_size_64);
1166 printf("rx_size_127: %lu\n", ns->rx_size_127);
1167 printf("rx_size_255: %lu\n", ns->rx_size_255);
1168 printf("rx_size_511: %lu\n", ns->rx_size_511);
1169 printf("rx_size_1023: %lu\n", ns->rx_size_1023);
1170 printf("rx_size_1522: %lu\n", ns->rx_size_1522);
1171 printf("rx_size_big: %lu\n", ns->rx_size_big);
1172 printf("rx_undersize: %lu\n", ns->rx_undersize);
1173 printf("rx_fragments: %lu\n", ns->rx_fragments);
1174 printf("rx_oversize: %lu\n", ns->rx_oversize);
1175 printf("rx_jabber: %lu\n", ns->rx_jabber);
1176 printf("tx_size_64: %lu\n", ns->tx_size_64);
1177 printf("tx_size_127: %lu\n", ns->tx_size_127);
1178 printf("tx_size_255: %lu\n", ns->tx_size_255);
1179 printf("tx_size_511: %lu\n", ns->tx_size_511);
1180 printf("tx_size_1023: %lu\n", ns->tx_size_1023);
1181 printf("tx_size_1522: %lu\n", ns->tx_size_1522);
1182 printf("tx_size_big: %lu\n", ns->tx_size_big);
1183 printf("mac_short_packet_dropped: %lu\n",
1184 ns->mac_short_packet_dropped);
1185 printf("checksum_error: %lu\n", ns->checksum_error);
1186 printf("***************** PF stats end ********************\n");
1187 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1190 /* Reset the statistics */
1192 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1194 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1196 /* It results in reloading the start point of each counter */
1197 pf->offset_loaded = false;
1201 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1202 __rte_unused uint16_t queue_id,
1203 __rte_unused uint8_t stat_idx,
1204 __rte_unused uint8_t is_rx)
1206 PMD_INIT_FUNC_TRACE();
1212 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1214 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1215 struct i40e_vsi *vsi = pf->main_vsi;
1217 dev_info->max_rx_queues = vsi->nb_qps;
1218 dev_info->max_tx_queues = vsi->nb_qps;
1219 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1220 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1221 dev_info->max_mac_addrs = vsi->max_macaddrs;
1222 dev_info->max_vfs = dev->pci_dev->max_vfs;
1223 dev_info->rx_offload_capa =
1224 DEV_RX_OFFLOAD_VLAN_STRIP |
1225 DEV_RX_OFFLOAD_IPV4_CKSUM |
1226 DEV_RX_OFFLOAD_UDP_CKSUM |
1227 DEV_RX_OFFLOAD_TCP_CKSUM;
1228 dev_info->tx_offload_capa =
1229 DEV_TX_OFFLOAD_VLAN_INSERT |
1230 DEV_TX_OFFLOAD_IPV4_CKSUM |
1231 DEV_TX_OFFLOAD_UDP_CKSUM |
1232 DEV_TX_OFFLOAD_TCP_CKSUM |
1233 DEV_TX_OFFLOAD_SCTP_CKSUM;
1237 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1239 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1240 struct i40e_vsi *vsi = pf->main_vsi;
1241 PMD_INIT_FUNC_TRACE();
1244 return i40e_vsi_add_vlan(vsi, vlan_id);
1246 return i40e_vsi_delete_vlan(vsi, vlan_id);
1250 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1251 __rte_unused uint16_t tpid)
1253 PMD_INIT_FUNC_TRACE();
1257 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1259 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1260 struct i40e_vsi *vsi = pf->main_vsi;
1262 if (mask & ETH_VLAN_STRIP_MASK) {
1263 /* Enable or disable VLAN stripping */
1264 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1265 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1267 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1270 if (mask & ETH_VLAN_EXTEND_MASK) {
1271 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1272 i40e_vsi_config_double_vlan(vsi, TRUE);
1274 i40e_vsi_config_double_vlan(vsi, FALSE);
1279 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1280 __rte_unused uint16_t queue,
1281 __rte_unused int on)
1283 PMD_INIT_FUNC_TRACE();
1287 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1289 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1290 struct i40e_vsi *vsi = pf->main_vsi;
1291 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1292 struct i40e_vsi_vlan_pvid_info info;
1294 memset(&info, 0, sizeof(info));
1297 info.config.pvid = pvid;
1299 info.config.reject.tagged =
1300 data->dev_conf.txmode.hw_vlan_reject_tagged;
1301 info.config.reject.untagged =
1302 data->dev_conf.txmode.hw_vlan_reject_untagged;
1305 return i40e_vsi_vlan_pvid_set(vsi, &info);
1309 i40e_dev_led_on(struct rte_eth_dev *dev)
1311 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1312 uint32_t mode = i40e_led_get(hw);
1315 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1321 i40e_dev_led_off(struct rte_eth_dev *dev)
1323 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1324 uint32_t mode = i40e_led_get(hw);
1327 i40e_led_set(hw, 0, false);
1333 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1334 __rte_unused struct rte_eth_fc_conf *fc_conf)
1336 PMD_INIT_FUNC_TRACE();
1342 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1343 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1345 PMD_INIT_FUNC_TRACE();
1350 /* Add a MAC address, and update filters */
1352 i40e_macaddr_add(struct rte_eth_dev *dev,
1353 struct ether_addr *mac_addr,
1354 __attribute__((unused)) uint32_t index,
1355 __attribute__((unused)) uint32_t pool)
1357 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1358 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359 struct i40e_vsi *vsi = pf->main_vsi;
1360 struct ether_addr old_mac;
1363 if (!is_valid_assigned_ether_addr(mac_addr)) {
1364 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1368 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1369 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
1373 /* Write mac address */
1374 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1375 mac_addr->addr_bytes, NULL);
1376 if (ret != I40E_SUCCESS) {
1377 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1381 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1382 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1385 ret = i40e_vsi_add_mac(vsi, mac_addr);
1386 if (ret != I40E_SUCCESS) {
1387 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
1391 ether_addr_copy(mac_addr, &pf->dev_addr);
1392 i40e_vsi_delete_mac(vsi, &old_mac);
1395 /* Remove a MAC address, and update filters */
1397 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1399 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1400 struct i40e_vsi *vsi = pf->main_vsi;
1401 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1402 struct ether_addr *macaddr;
1404 struct i40e_hw *hw =
1405 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1407 if (index >= vsi->max_macaddrs)
1410 macaddr = &(data->mac_addrs[index]);
1411 if (!is_valid_assigned_ether_addr(macaddr))
1414 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1415 hw->mac.perm_addr, NULL);
1416 if (ret != I40E_SUCCESS) {
1417 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1421 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1423 ret = i40e_vsi_delete_mac(vsi, macaddr);
1424 if (ret != I40E_SUCCESS)
1427 /* Clear device address as it has been removed */
1428 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1429 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1433 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1434 struct rte_eth_rss_reta *reta_conf)
1436 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1438 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1440 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1442 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1444 mask = (uint8_t)((reta_conf->mask_hi >>
1453 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1455 for (j = 0, lut = 0; j < 4; j++) {
1456 if (mask & (0x1 << j))
1457 lut |= reta_conf->reta[i + j] << (8 * j);
1459 lut |= l & (0xFF << (8 * j));
1461 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1468 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1469 struct rte_eth_rss_reta *reta_conf)
1471 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1473 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1475 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1477 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1479 mask = (uint8_t)((reta_conf->mask_hi >>
1485 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1486 for (j = 0; j < 4; j++) {
1487 if (mask & (0x1 << j))
1488 reta_conf->reta[i + j] =
1489 (uint8_t)((lut >> (8 * j)) & 0xFF);
1497 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1498 * @hw: pointer to the HW structure
1499 * @mem: pointer to mem struct to fill out
1500 * @size: size of memory requested
1501 * @alignment: what to align the allocation to
1503 enum i40e_status_code
1504 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1505 struct i40e_dma_mem *mem,
1509 static uint64_t id = 0;
1510 const struct rte_memzone *mz = NULL;
1511 char z_name[RTE_MEMZONE_NAMESIZE];
1514 return I40E_ERR_PARAM;
1517 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1518 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1520 return I40E_ERR_NO_MEMORY;
1525 mem->pa = mz->phys_addr;
1527 return I40E_SUCCESS;
1531 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1532 * @hw: pointer to the HW structure
1533 * @mem: ptr to mem struct to free
1535 enum i40e_status_code
1536 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1537 struct i40e_dma_mem *mem)
1539 if (!mem || !mem->va)
1540 return I40E_ERR_PARAM;
1545 return I40E_SUCCESS;
1549 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1550 * @hw: pointer to the HW structure
1551 * @mem: pointer to mem struct to fill out
1552 * @size: size of memory requested
1554 enum i40e_status_code
1555 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1556 struct i40e_virt_mem *mem,
1560 return I40E_ERR_PARAM;
1563 mem->va = rte_zmalloc("i40e", size, 0);
1566 return I40E_SUCCESS;
1568 return I40E_ERR_NO_MEMORY;
1572 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1573 * @hw: pointer to the HW structure
1574 * @mem: pointer to mem struct to free
1576 enum i40e_status_code
1577 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1578 struct i40e_virt_mem *mem)
1581 return I40E_ERR_PARAM;
1586 return I40E_SUCCESS;
1590 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1592 rte_spinlock_init(&sp->spinlock);
1596 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1598 rte_spinlock_lock(&sp->spinlock);
1602 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1604 rte_spinlock_unlock(&sp->spinlock);
1608 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1614 * Get the hardware capabilities, which will be parsed
1615 * and saved into struct i40e_hw.
1618 i40e_get_cap(struct i40e_hw *hw)
1620 struct i40e_aqc_list_capabilities_element_resp *buf;
1621 uint16_t len, size = 0;
1624 /* Calculate a huge enough buff for saving response data temporarily */
1625 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1626 I40E_MAX_CAP_ELE_NUM;
1627 buf = rte_zmalloc("i40e", len, 0);
1629 PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
1630 return I40E_ERR_NO_MEMORY;
1633 /* Get, parse the capabilities and save it to hw */
1634 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1635 i40e_aqc_opc_list_func_capabilities, NULL);
1636 if (ret != I40E_SUCCESS)
1637 PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
1639 /* Free the temporary buffer after being used */
1646 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1648 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1649 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1650 uint16_t sum_queues = 0, sum_vsis;
1652 /* First check if FW support SRIOV */
1653 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1654 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
1658 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1659 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1660 PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
1661 /* Allocate queues for pf */
1662 if (hw->func_caps.rss) {
1663 pf->flags |= I40E_FLAG_RSS;
1664 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1665 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1666 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1669 sum_queues = pf->lan_nb_qps;
1670 /* Default VSI is not counted in */
1672 PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
1674 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1675 pf->flags |= I40E_FLAG_SRIOV;
1676 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1677 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1678 PMD_INIT_LOG(ERR, "Config VF number %u, "
1679 "max supported %u.\n", dev->pci_dev->max_vfs,
1680 hw->func_caps.num_vfs);
1683 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1684 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1685 "max support %u queues.\n", pf->vf_nb_qps,
1686 I40E_MAX_QP_NUM_PER_VF);
1689 pf->vf_num = dev->pci_dev->max_vfs;
1690 sum_queues += pf->vf_nb_qps * pf->vf_num;
1691 sum_vsis += pf->vf_num;
1692 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
1693 pf->vf_num, pf->vf_nb_qps);
1697 if (hw->func_caps.vmdq) {
1698 pf->flags |= I40E_FLAG_VMDQ;
1699 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1700 sum_queues += pf->vmdq_nb_qps;
1702 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
1705 if (hw->func_caps.fd) {
1706 pf->flags |= I40E_FLAG_FDIR;
1707 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1709 * Each flow director consumes one VSI and one queue,
1710 * but can't calculate out predictably here.
1714 if (sum_vsis > pf->max_num_vsi ||
1715 sum_queues > hw->func_caps.num_rx_qp) {
1716 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
1717 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
1718 pf->max_num_vsi, sum_vsis);
1719 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
1720 hw->func_caps.num_rx_qp, sum_queues);
1724 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
1725 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1726 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
1727 sum_vsis, hw->func_caps.num_msix_vectors);
1730 return I40E_SUCCESS;
1734 i40e_pf_get_switch_config(struct i40e_pf *pf)
1736 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1737 struct i40e_aqc_get_switch_config_resp *switch_config;
1738 struct i40e_aqc_switch_config_element_resp *element;
1739 uint16_t start_seid = 0, num_reported;
1742 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1743 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1744 if (!switch_config) {
1745 PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
1749 /* Get the switch configurations */
1750 ret = i40e_aq_get_switch_config(hw, switch_config,
1751 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1752 if (ret != I40E_SUCCESS) {
1753 PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
1756 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1757 if (num_reported != 1) { /* The number should be 1 */
1758 PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
1762 /* Parse the switch configuration elements */
1763 element = &(switch_config->element[0]);
1764 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1765 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1766 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1768 PMD_DRV_LOG(INFO, "Unknown element type\n");
1771 rte_free(switch_config);
1777 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1780 struct pool_entry *entry;
1782 if (pool == NULL || num == 0)
1785 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1786 if (entry == NULL) {
1787 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1792 /* queue heap initialize */
1793 pool->num_free = num;
1794 pool->num_alloc = 0;
1796 LIST_INIT(&pool->alloc_list);
1797 LIST_INIT(&pool->free_list);
1799 /* Initialize element */
1803 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1808 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1810 struct pool_entry *entry;
1815 LIST_FOREACH(entry, &pool->alloc_list, next) {
1816 LIST_REMOVE(entry, next);
1820 LIST_FOREACH(entry, &pool->free_list, next) {
1821 LIST_REMOVE(entry, next);
1826 pool->num_alloc = 0;
1828 LIST_INIT(&pool->alloc_list);
1829 LIST_INIT(&pool->free_list);
1833 i40e_res_pool_free(struct i40e_res_pool_info *pool,
1836 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
1837 uint32_t pool_offset;
1841 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1845 pool_offset = base - pool->base;
1846 /* Lookup in alloc list */
1847 LIST_FOREACH(entry, &pool->alloc_list, next) {
1848 if (entry->base == pool_offset) {
1849 valid_entry = entry;
1850 LIST_REMOVE(entry, next);
1855 /* Not find, return */
1856 if (valid_entry == NULL) {
1857 PMD_DRV_LOG(ERR, "Failed to find entry\n");
1862 * Found it, move it to free list and try to merge.
1863 * In order to make merge easier, always sort it by qbase.
1864 * Find adjacent prev and last entries.
1867 LIST_FOREACH(entry, &pool->free_list, next) {
1868 if (entry->base > valid_entry->base) {
1876 /* Try to merge with next one*/
1878 /* Merge with next one */
1879 if (valid_entry->base + valid_entry->len == next->base) {
1880 next->base = valid_entry->base;
1881 next->len += valid_entry->len;
1882 rte_free(valid_entry);
1889 /* Merge with previous one */
1890 if (prev->base + prev->len == valid_entry->base) {
1891 prev->len += valid_entry->len;
1892 /* If it merge with next one, remove next node */
1894 LIST_REMOVE(valid_entry, next);
1895 rte_free(valid_entry);
1897 rte_free(valid_entry);
1903 /* Not find any entry to merge, insert */
1906 LIST_INSERT_AFTER(prev, valid_entry, next);
1907 else if (next != NULL)
1908 LIST_INSERT_BEFORE(next, valid_entry, next);
1909 else /* It's empty list, insert to head */
1910 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
1913 pool->num_free += valid_entry->len;
1914 pool->num_alloc -= valid_entry->len;
1920 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
1923 struct pool_entry *entry, *valid_entry;
1925 if (pool == NULL || num == 0) {
1926 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1930 if (pool->num_free < num) {
1931 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
1932 num, pool->num_free);
1937 /* Lookup in free list and find most fit one */
1938 LIST_FOREACH(entry, &pool->free_list, next) {
1939 if (entry->len >= num) {
1941 if (entry->len == num) {
1942 valid_entry = entry;
1945 if (valid_entry == NULL || valid_entry->len > entry->len)
1946 valid_entry = entry;
1950 /* Not find one to satisfy the request, return */
1951 if (valid_entry == NULL) {
1952 PMD_DRV_LOG(ERR, "No valid entry found\n");
1956 * The entry have equal queue number as requested,
1957 * remove it from alloc_list.
1959 if (valid_entry->len == num) {
1960 LIST_REMOVE(valid_entry, next);
1963 * The entry have more numbers than requested,
1964 * create a new entry for alloc_list and minus its
1965 * queue base and number in free_list.
1967 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
1968 if (entry == NULL) {
1969 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1973 entry->base = valid_entry->base;
1975 valid_entry->base += num;
1976 valid_entry->len -= num;
1977 valid_entry = entry;
1980 /* Insert it into alloc list, not sorted */
1981 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
1983 pool->num_free -= valid_entry->len;
1984 pool->num_alloc += valid_entry->len;
1986 return (valid_entry->base + pool->base);
1990 * bitmap_is_subset - Check whether src2 is subset of src1
1993 bitmap_is_subset(uint8_t src1, uint8_t src2)
1995 return !((src1 ^ src2) & src2);
1999 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2001 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2003 /* If DCB is not supported, only default TC is supported */
2004 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2005 PMD_DRV_LOG(ERR, "DCB is not enabled, "
2006 "only TC0 is supported\n");
2010 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2011 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2012 "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
2016 return I40E_SUCCESS;
2020 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2021 struct i40e_vsi_vlan_pvid_info *info)
2024 struct i40e_vsi_context ctxt;
2025 uint8_t vlan_flags = 0;
2028 if (vsi == NULL || info == NULL) {
2029 PMD_DRV_LOG(ERR, "invalid parameters\n");
2030 return I40E_ERR_PARAM;
2034 vsi->info.pvid = info->config.pvid;
2036 * If insert pvid is enabled, only tagged pkts are
2037 * allowed to be sent out.
2039 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2040 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2043 if (info->config.reject.tagged == 0)
2044 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2046 if (info->config.reject.untagged == 0)
2047 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2049 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2050 I40E_AQ_VSI_PVLAN_MODE_MASK);
2051 vsi->info.port_vlan_flags |= vlan_flags;
2052 vsi->info.valid_sections =
2053 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2054 memset(&ctxt, 0, sizeof(ctxt));
2055 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2056 ctxt.seid = vsi->seid;
2058 hw = I40E_VSI_TO_HW(vsi);
2059 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2060 if (ret != I40E_SUCCESS)
2061 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2067 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2069 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2071 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2073 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2074 if (ret != I40E_SUCCESS)
2078 PMD_DRV_LOG(ERR, "seid not valid\n");
2082 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2083 tc_bw_data.tc_valid_bits = enabled_tcmap;
2084 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2085 tc_bw_data.tc_bw_credits[i] =
2086 (enabled_tcmap & (1 << i)) ? 1 : 0;
2088 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2089 if (ret != I40E_SUCCESS) {
2090 PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
2094 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2095 sizeof(vsi->info.qs_handle));
2096 return I40E_SUCCESS;
2100 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2101 struct i40e_aqc_vsi_properties_data *info,
2102 uint8_t enabled_tcmap)
2104 int ret, total_tc = 0, i;
2105 uint16_t qpnum_per_tc, bsf, qp_idx;
2107 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2108 if (ret != I40E_SUCCESS)
2111 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2112 if (enabled_tcmap & (1 << i))
2114 vsi->enabled_tc = enabled_tcmap;
2116 /* Number of queues per enabled TC */
2117 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2118 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2119 bsf = rte_bsf32(qpnum_per_tc);
2121 /* Adjust the queue number to actual queues that can be applied */
2122 vsi->nb_qps = qpnum_per_tc * total_tc;
2125 * Configure TC and queue mapping parameters, for enabled TC,
2126 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2127 * default queue will serve it.
2130 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2131 if (vsi->enabled_tc & (1 << i)) {
2132 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2133 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2134 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2135 qp_idx += qpnum_per_tc;
2137 info->tc_mapping[i] = 0;
2140 /* Associate queue number with VSI */
2141 if (vsi->type == I40E_VSI_SRIOV) {
2142 info->mapping_flags |=
2143 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2144 for (i = 0; i < vsi->nb_qps; i++)
2145 info->queue_mapping[i] =
2146 rte_cpu_to_le_16(vsi->base_queue + i);
2148 info->mapping_flags |=
2149 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2150 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2152 info->valid_sections =
2153 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2155 return I40E_SUCCESS;
2159 i40e_veb_release(struct i40e_veb *veb)
2161 struct i40e_vsi *vsi;
2164 if (veb == NULL || veb->associate_vsi == NULL)
2167 if (!TAILQ_EMPTY(&veb->head)) {
2168 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
2172 vsi = veb->associate_vsi;
2173 hw = I40E_VSI_TO_HW(vsi);
2175 vsi->uplink_seid = veb->uplink_seid;
2176 i40e_aq_delete_element(hw, veb->seid, NULL);
2179 return I40E_SUCCESS;
2183 static struct i40e_veb *
2184 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2186 struct i40e_veb *veb;
2190 if (NULL == pf || vsi == NULL) {
2191 PMD_DRV_LOG(ERR, "veb setup failed, "
2192 "associated VSI shouldn't null\n");
2195 hw = I40E_PF_TO_HW(pf);
2197 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2199 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
2203 veb->associate_vsi = vsi;
2204 TAILQ_INIT(&veb->head);
2205 veb->uplink_seid = vsi->uplink_seid;
2207 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2208 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2210 if (ret != I40E_SUCCESS) {
2211 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
2212 hw->aq.asq_last_status);
2216 /* get statistics index */
2217 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2218 &veb->stats_idx, NULL, NULL, NULL);
2219 if (ret != I40E_SUCCESS) {
2220 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
2221 hw->aq.asq_last_status);
2225 /* Get VEB bandwidth, to be implemented */
2226 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2227 vsi->uplink_seid = veb->seid;
2236 i40e_vsi_release(struct i40e_vsi *vsi)
2240 struct i40e_vsi_list *vsi_list;
2242 struct i40e_mac_filter *f;
2245 return I40E_SUCCESS;
2247 pf = I40E_VSI_TO_PF(vsi);
2248 hw = I40E_VSI_TO_HW(vsi);
2250 /* VSI has child to attach, release child first */
2252 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2253 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2255 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2257 i40e_veb_release(vsi->veb);
2260 /* Remove all macvlan filters of the VSI */
2261 i40e_vsi_remove_all_macvlan_filter(vsi);
2262 TAILQ_FOREACH(f, &vsi->mac_list, next)
2265 if (vsi->type != I40E_VSI_MAIN) {
2266 /* Remove vsi from parent's sibling list */
2267 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2268 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
2269 return I40E_ERR_PARAM;
2271 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2272 &vsi->sib_vsi_list, list);
2274 /* Remove all switch element of the VSI */
2275 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2276 if (ret != I40E_SUCCESS)
2277 PMD_DRV_LOG(ERR, "Failed to delete element\n");
2279 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2281 if (vsi->type != I40E_VSI_SRIOV)
2282 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2285 return I40E_SUCCESS;
2289 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2291 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2292 struct i40e_aqc_remove_macvlan_element_data def_filter;
2295 if (vsi->type != I40E_VSI_MAIN)
2296 return I40E_ERR_CONFIG;
2297 memset(&def_filter, 0, sizeof(def_filter));
2298 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2300 def_filter.vlan_tag = 0;
2301 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2302 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2303 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2304 if (ret != I40E_SUCCESS) {
2305 struct i40e_mac_filter *f;
2307 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2308 "macvlan filter\n");
2309 /* It needs to add the permanent mac into mac list */
2310 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2312 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
2313 return I40E_ERR_NO_MEMORY;
2315 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2317 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2323 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2327 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2329 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2330 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2331 struct i40e_hw *hw = &vsi->adapter->hw;
2335 memset(&bw_config, 0, sizeof(bw_config));
2336 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2337 if (ret != I40E_SUCCESS) {
2338 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
2339 "configuration %u\n", hw->aq.asq_last_status);
2343 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2344 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2345 &ets_sla_config, NULL);
2346 if (ret != I40E_SUCCESS) {
2347 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2348 "configuration %u\n", hw->aq.asq_last_status);
2352 /* Not store the info yet, just print out */
2353 PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
2354 PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
2355 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2356 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
2357 ets_sla_config.share_credits[i]);
2358 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
2359 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2360 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2361 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2370 i40e_vsi_setup(struct i40e_pf *pf,
2371 enum i40e_vsi_type type,
2372 struct i40e_vsi *uplink_vsi,
2373 uint16_t user_param)
2375 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2376 struct i40e_vsi *vsi;
2378 struct i40e_vsi_context ctxt;
2379 struct ether_addr broadcast =
2380 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2382 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2383 PMD_DRV_LOG(ERR, "VSI setup failed, "
2384 "VSI link shouldn't be NULL\n");
2388 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2389 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2390 "uplink VSI should be NULL\n");
2394 /* If uplink vsi didn't setup VEB, create one first */
2395 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2396 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2398 if (NULL == uplink_vsi->veb) {
2399 PMD_DRV_LOG(ERR, "VEB setup failed\n");
2404 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2406 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
2409 TAILQ_INIT(&vsi->mac_list);
2411 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2412 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2413 vsi->parent_vsi = uplink_vsi;
2414 vsi->user_param = user_param;
2415 /* Allocate queues */
2416 switch (vsi->type) {
2417 case I40E_VSI_MAIN :
2418 vsi->nb_qps = pf->lan_nb_qps;
2420 case I40E_VSI_SRIOV :
2421 vsi->nb_qps = pf->vf_nb_qps;
2426 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2428 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2432 vsi->base_queue = ret;
2434 /* VF has MSIX interrupt in VF range, don't allocate here */
2435 if (type != I40E_VSI_SRIOV) {
2436 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2438 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2439 goto fail_queue_alloc;
2441 vsi->msix_intr = ret;
2445 if (type == I40E_VSI_MAIN) {
2446 /* For main VSI, no need to add since it's default one */
2447 vsi->uplink_seid = pf->mac_seid;
2448 vsi->seid = pf->main_vsi_seid;
2449 /* Bind queues with specific MSIX interrupt */
2451 * Needs 2 interrupt at least, one for misc cause which will
2452 * enabled from OS side, Another for queues binding the
2453 * interrupt from device side only.
2456 /* Get default VSI parameters from hardware */
2457 memset(&ctxt, 0, sizeof(ctxt));
2458 ctxt.seid = vsi->seid;
2459 ctxt.pf_num = hw->pf_id;
2460 ctxt.uplink_seid = vsi->uplink_seid;
2462 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2463 if (ret != I40E_SUCCESS) {
2464 PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
2465 goto fail_msix_alloc;
2467 (void)rte_memcpy(&vsi->info, &ctxt.info,
2468 sizeof(struct i40e_aqc_vsi_properties_data));
2469 vsi->vsi_id = ctxt.vsi_number;
2470 vsi->info.valid_sections = 0;
2472 /* Configure tc, enabled TC0 only */
2473 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2475 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
2476 goto fail_msix_alloc;
2479 /* TC, queue mapping */
2480 memset(&ctxt, 0, sizeof(ctxt));
2481 vsi->info.valid_sections |=
2482 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2483 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2484 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2485 (void)rte_memcpy(&ctxt.info, &vsi->info,
2486 sizeof(struct i40e_aqc_vsi_properties_data));
2487 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2488 I40E_DEFAULT_TCMAP);
2489 if (ret != I40E_SUCCESS) {
2490 PMD_DRV_LOG(ERR, "Failed to configure "
2491 "TC queue mapping\n");
2492 goto fail_msix_alloc;
2494 ctxt.seid = vsi->seid;
2495 ctxt.pf_num = hw->pf_id;
2496 ctxt.uplink_seid = vsi->uplink_seid;
2499 /* Update VSI parameters */
2500 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2501 if (ret != I40E_SUCCESS) {
2502 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2503 goto fail_msix_alloc;
2506 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2507 sizeof(vsi->info.tc_mapping));
2508 (void)rte_memcpy(&vsi->info.queue_mapping,
2509 &ctxt.info.queue_mapping,
2510 sizeof(vsi->info.queue_mapping));
2511 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2512 vsi->info.valid_sections = 0;
2514 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2518 * Updating default filter settings are necessary to prevent
2519 * reception of tagged packets.
2520 * Some old firmware configurations load a default macvlan
2521 * filter which accepts both tagged and untagged packets.
2522 * The updating is to use a normal filter instead if needed.
2523 * For NVM 4.2.2 or after, the updating is not needed anymore.
2524 * The firmware with correct configurations load the default
2525 * macvlan filter which is expected and cannot be removed.
2527 i40e_update_default_filter_setting(vsi);
2528 } else if (type == I40E_VSI_SRIOV) {
2529 memset(&ctxt, 0, sizeof(ctxt));
2531 * For other VSI, the uplink_seid equals to uplink VSI's
2532 * uplink_seid since they share same VEB
2534 vsi->uplink_seid = uplink_vsi->uplink_seid;
2535 ctxt.pf_num = hw->pf_id;
2536 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2537 ctxt.uplink_seid = vsi->uplink_seid;
2538 ctxt.connection_type = 0x1;
2539 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2541 /* Configure switch ID */
2542 ctxt.info.valid_sections |=
2543 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2544 ctxt.info.switch_id =
2545 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2546 /* Configure port/vlan */
2547 ctxt.info.valid_sections |=
2548 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2549 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2550 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2551 I40E_DEFAULT_TCMAP);
2552 if (ret != I40E_SUCCESS) {
2553 PMD_DRV_LOG(ERR, "Failed to configure "
2554 "TC queue mapping\n");
2555 goto fail_msix_alloc;
2557 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2558 ctxt.info.valid_sections |=
2559 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2561 * Since VSI is not created yet, only configure parameter,
2562 * will add vsi below.
2566 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
2567 goto fail_msix_alloc;
2570 if (vsi->type != I40E_VSI_MAIN) {
2571 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2573 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
2574 hw->aq.asq_last_status);
2575 goto fail_msix_alloc;
2577 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2578 vsi->info.valid_sections = 0;
2579 vsi->seid = ctxt.seid;
2580 vsi->vsi_id = ctxt.vsi_number;
2581 vsi->sib_vsi_list.vsi = vsi;
2582 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2583 &vsi->sib_vsi_list, list);
2586 /* MAC/VLAN configuration */
2587 ret = i40e_vsi_add_mac(vsi, &broadcast);
2588 if (ret != I40E_SUCCESS) {
2589 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
2590 goto fail_msix_alloc;
2593 /* Get VSI BW information */
2594 i40e_vsi_dump_bw_config(vsi);
2597 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2599 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2605 /* Configure vlan stripping on or off */
2607 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2609 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2610 struct i40e_vsi_context ctxt;
2612 int ret = I40E_SUCCESS;
2614 /* Check if it has been already on or off */
2615 if (vsi->info.valid_sections &
2616 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2618 if ((vsi->info.port_vlan_flags &
2619 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2620 return 0; /* already on */
2622 if ((vsi->info.port_vlan_flags &
2623 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2624 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2625 return 0; /* already off */
2630 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2632 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2633 vsi->info.valid_sections =
2634 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2635 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2636 vsi->info.port_vlan_flags |= vlan_flags;
2637 ctxt.seid = vsi->seid;
2638 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2639 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2641 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
2642 on ? "enable" : "disable");
2648 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2650 struct rte_eth_dev_data *data = dev->data;
2653 /* Apply vlan offload setting */
2654 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2656 /* Apply double-vlan setting, not implemented yet */
2658 /* Apply pvid setting */
2659 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2660 data->dev_conf.txmode.hw_vlan_insert_pvid);
2662 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
2668 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2670 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2672 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2676 i40e_update_flow_control(struct i40e_hw *hw)
2678 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2679 struct i40e_link_status link_status;
2680 uint32_t rxfc = 0, txfc = 0, reg;
2684 memset(&link_status, 0, sizeof(link_status));
2685 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2686 if (ret != I40E_SUCCESS) {
2687 PMD_DRV_LOG(ERR, "Failed to get link status information\n");
2688 goto write_reg; /* Disable flow control */
2691 an_info = hw->phy.link_info.an_info;
2692 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2693 PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
2694 ret = I40E_ERR_NOT_READY;
2695 goto write_reg; /* Disable flow control */
2698 * If link auto negotiation is enabled, flow control needs to
2699 * be configured according to it
2701 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2702 case I40E_LINK_PAUSE_RXTX:
2705 hw->fc.current_mode = I40E_FC_FULL;
2707 case I40E_AQ_LINK_PAUSE_RX:
2709 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2711 case I40E_AQ_LINK_PAUSE_TX:
2713 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2716 hw->fc.current_mode = I40E_FC_NONE;
2721 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2722 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2723 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2724 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2725 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2726 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2733 i40e_pf_setup(struct i40e_pf *pf)
2735 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2736 struct i40e_filter_control_settings settings;
2737 struct rte_eth_dev_data *dev_data = pf->dev_data;
2738 struct i40e_vsi *vsi;
2741 /* Clear all stats counters */
2742 pf->offset_loaded = FALSE;
2743 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2744 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2746 ret = i40e_pf_get_switch_config(pf);
2747 if (ret != I40E_SUCCESS) {
2748 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2753 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2755 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2756 return I40E_ERR_NOT_READY;
2759 dev_data->nb_rx_queues = vsi->nb_qps;
2760 dev_data->nb_tx_queues = vsi->nb_qps;
2762 /* Configure filter control */
2763 memset(&settings, 0, sizeof(settings));
2764 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2765 /* Enable ethtype and macvlan filters */
2766 settings.enable_ethtype = TRUE;
2767 settings.enable_macvlan = TRUE;
2768 ret = i40e_set_filter_control(hw, &settings);
2770 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2773 /* Update flow control according to the auto negotiation */
2774 i40e_update_flow_control(hw);
2776 return I40E_SUCCESS;
2780 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2786 * Set or clear TX Queue Disable flags,
2787 * which is required by hardware.
2789 i40e_pre_tx_queue_cfg(hw, q_idx, on);
2790 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
2792 /* Wait until the request is finished */
2793 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2794 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2795 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2796 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2797 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2803 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2804 return I40E_SUCCESS; /* already on, skip next steps */
2806 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
2807 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2809 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2810 return I40E_SUCCESS; /* already off, skip next steps */
2811 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2813 /* Write the register */
2814 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2815 /* Check the result */
2816 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2817 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2818 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2820 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2821 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2824 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2825 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2829 /* Check if it is timeout */
2830 if (j >= I40E_CHK_Q_ENA_COUNT) {
2831 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
2832 (on ? "enable" : "disable"), q_idx);
2833 return I40E_ERR_TIMEOUT;
2836 return I40E_SUCCESS;
2839 /* Swith on or off the tx queues */
2841 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
2843 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2844 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2845 struct i40e_tx_queue *txq;
2849 pf_q = vsi->base_queue;
2850 for (i = 0; i < dev_data->nb_tx_queues; i++, pf_q++) {
2851 txq = dev_data->tx_queues[i];
2853 continue; /* Queue not configured */
2854 ret = i40e_switch_tx_queue(hw, pf_q, on);
2855 if ( ret != I40E_SUCCESS)
2859 return I40E_SUCCESS;
2863 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2868 /* Wait until the request is finished */
2869 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2870 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2871 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2872 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2873 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
2878 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2879 return I40E_SUCCESS; /* Already on, skip next steps */
2880 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2882 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2883 return I40E_SUCCESS; /* Already off, skip next steps */
2884 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2887 /* Write the register */
2888 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
2889 /* Check the result */
2890 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2891 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2892 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2894 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2895 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
2898 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2899 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2904 /* Check if it is timeout */
2905 if (j >= I40E_CHK_Q_ENA_COUNT) {
2906 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
2907 (on ? "enable" : "disable"), q_idx);
2908 return I40E_ERR_TIMEOUT;
2911 return I40E_SUCCESS;
2913 /* Switch on or off the rx queues */
2915 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
2917 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2918 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2919 struct i40e_rx_queue *rxq;
2923 pf_q = vsi->base_queue;
2924 for (i = 0; i < dev_data->nb_rx_queues; i++, pf_q++) {
2925 rxq = dev_data->rx_queues[i];
2927 continue; /* Queue not configured */
2928 ret = i40e_switch_rx_queue(hw, pf_q, on);
2929 if ( ret != I40E_SUCCESS)
2933 return I40E_SUCCESS;
2936 /* Switch on or off all the rx/tx queues */
2938 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
2943 /* enable rx queues before enabling tx queues */
2944 ret = i40e_vsi_switch_rx_queues(vsi, on);
2946 PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
2949 ret = i40e_vsi_switch_tx_queues(vsi, on);
2951 /* Stop tx queues before stopping rx queues */
2952 ret = i40e_vsi_switch_tx_queues(vsi, on);
2954 PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
2957 ret = i40e_vsi_switch_rx_queues(vsi, on);
2963 /* Initialize VSI for TX */
2965 i40e_vsi_tx_init(struct i40e_vsi *vsi)
2967 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2968 struct rte_eth_dev_data *data = pf->dev_data;
2970 uint32_t ret = I40E_SUCCESS;
2972 for (i = 0; i < data->nb_tx_queues; i++) {
2973 ret = i40e_tx_queue_init(data->tx_queues[i]);
2974 if (ret != I40E_SUCCESS)
2981 /* Initialize VSI for RX */
2983 i40e_vsi_rx_init(struct i40e_vsi *vsi)
2985 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2986 struct rte_eth_dev_data *data = pf->dev_data;
2987 int ret = I40E_SUCCESS;
2990 i40e_pf_config_mq_rx(pf);
2991 for (i = 0; i < data->nb_rx_queues; i++) {
2992 ret = i40e_rx_queue_init(data->rx_queues[i]);
2993 if (ret != I40E_SUCCESS) {
2994 PMD_DRV_LOG(ERR, "Failed to do RX queue "
2995 "initialization\n");
3003 /* Initialize VSI */
3005 i40e_vsi_init(struct i40e_vsi *vsi)
3009 err = i40e_vsi_tx_init(vsi);
3011 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
3014 err = i40e_vsi_rx_init(vsi);
3016 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
3024 i40e_stat_update_32(struct i40e_hw *hw,
3032 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3036 if (new_data >= *offset)
3037 *stat = (uint64_t)(new_data - *offset);
3039 *stat = (uint64_t)((new_data +
3040 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3044 i40e_stat_update_48(struct i40e_hw *hw,
3053 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3054 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3055 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3060 if (new_data >= *offset)
3061 *stat = new_data - *offset;
3063 *stat = (uint64_t)((new_data +
3064 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3066 *stat &= I40E_48_BIT_MASK;
3071 i40e_pf_disable_irq0(struct i40e_hw *hw)
3073 /* Disable all interrupt types */
3074 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3075 I40E_WRITE_FLUSH(hw);
3080 i40e_pf_enable_irq0(struct i40e_hw *hw)
3082 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3083 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3084 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3085 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3086 I40E_WRITE_FLUSH(hw);
3090 i40e_pf_config_irq0(struct i40e_hw *hw)
3094 /* read pending request and disable first */
3095 i40e_pf_disable_irq0(hw);
3097 * Enable all interrupt error options to detect possible errors,
3098 * other informative int are ignored
3100 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3101 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3102 I40E_PFINT_ICR0_ENA_GRST_MASK |
3103 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3104 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3105 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3106 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3107 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3109 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3110 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3111 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3113 /* Link no queues with irq0 */
3114 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3115 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3119 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3121 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3122 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3125 uint32_t index, offset, val;
3130 * Try to find which VF trigger a reset, use absolute VF id to access
3131 * since the reg is global register.
3133 for (i = 0; i < pf->vf_num; i++) {
3134 abs_vf_id = hw->func_caps.vf_base_id + i;
3135 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3136 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3137 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3138 /* VFR event occured */
3139 if (val & (0x1 << offset)) {
3142 /* Clear the event first */
3143 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3145 PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
3147 * Only notify a VF reset event occured,
3148 * don't trigger another SW reset
3150 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3151 if (ret != I40E_SUCCESS)
3152 PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
3158 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3160 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3161 struct i40e_arq_event_info info;
3162 uint16_t pending, opcode;
3165 info.msg_size = I40E_AQ_BUF_SZ;
3166 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3167 if (!info.msg_buf) {
3168 PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
3174 ret = i40e_clean_arq_element(hw, &info, &pending);
3176 if (ret != I40E_SUCCESS) {
3177 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3178 "aq_err: %u\n", hw->aq.asq_last_status);
3181 opcode = rte_le_to_cpu_16(info.desc.opcode);
3184 case i40e_aqc_opc_send_msg_to_pf:
3185 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3186 i40e_pf_host_handle_vf_msg(dev,
3187 rte_le_to_cpu_16(info.desc.retval),
3188 rte_le_to_cpu_32(info.desc.cookie_high),
3189 rte_le_to_cpu_32(info.desc.cookie_low),
3194 PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
3198 /* Reset the buffer after processing one */
3199 info.msg_size = I40E_AQ_BUF_SZ;
3201 rte_free(info.msg_buf);
3205 * Interrupt handler triggered by NIC for handling
3206 * specific interrupt.
3209 * Pointer to interrupt handle.
3211 * The address of parameter (struct rte_eth_dev *) regsitered before.
3217 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3220 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3221 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3222 uint32_t cause, enable;
3224 i40e_pf_disable_irq0(hw);
3226 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3227 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3229 /* Shared IRQ case, return */
3230 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3231 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3232 "no INT event to process\n", hw->pf_id);
3236 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3237 PMD_DRV_LOG(INFO, "INT:Link status changed\n");
3238 i40e_dev_link_update(dev, 0);
3241 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3242 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
3244 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3245 PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
3247 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3248 PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
3250 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3251 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
3253 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3254 PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
3256 /* Add processing func to deal with VF reset vent */
3257 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3258 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3259 i40e_dev_handle_vfr_event(dev);
3261 /* Find admin queue event */
3262 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3263 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3264 i40e_dev_handle_aq_msg(dev);
3268 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3269 /* Re-enable interrupt from device side */
3270 i40e_pf_enable_irq0(hw);
3271 /* Re-enable interrupt from host side */
3272 rte_intr_enable(&(dev->pci_dev->intr_handle));
3276 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3277 struct i40e_macvlan_filter *filter,
3280 int ele_num, ele_buff_size;
3281 int num, actual_num, i;
3282 int ret = I40E_SUCCESS;
3283 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3284 struct i40e_aqc_add_macvlan_element_data *req_list;
3286 if (filter == NULL || total == 0)
3287 return I40E_ERR_PARAM;
3288 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3289 ele_buff_size = hw->aq.asq_buf_size;
3291 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3292 if (req_list == NULL) {
3293 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3294 return I40E_ERR_NO_MEMORY;
3299 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3300 memset(req_list, 0, ele_buff_size);
3302 for (i = 0; i < actual_num; i++) {
3303 (void)rte_memcpy(req_list[i].mac_addr,
3304 &filter[num + i].macaddr, ETH_ADDR_LEN);
3305 req_list[i].vlan_tag =
3306 rte_cpu_to_le_16(filter[num + i].vlan_id);
3307 req_list[i].flags = rte_cpu_to_le_16(\
3308 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3309 req_list[i].queue_number = 0;
3312 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3314 if (ret != I40E_SUCCESS) {
3315 PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
3319 } while (num < total);
3327 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3328 struct i40e_macvlan_filter *filter,
3331 int ele_num, ele_buff_size;
3332 int num, actual_num, i;
3333 int ret = I40E_SUCCESS;
3334 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3335 struct i40e_aqc_remove_macvlan_element_data *req_list;
3337 if (filter == NULL || total == 0)
3338 return I40E_ERR_PARAM;
3340 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3341 ele_buff_size = hw->aq.asq_buf_size;
3343 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3344 if (req_list == NULL) {
3345 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3346 return I40E_ERR_NO_MEMORY;
3351 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3352 memset(req_list, 0, ele_buff_size);
3354 for (i = 0; i < actual_num; i++) {
3355 (void)rte_memcpy(req_list[i].mac_addr,
3356 &filter[num + i].macaddr, ETH_ADDR_LEN);
3357 req_list[i].vlan_tag =
3358 rte_cpu_to_le_16(filter[num + i].vlan_id);
3359 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3362 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3364 if (ret != I40E_SUCCESS) {
3365 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
3369 } while (num < total);
3376 /* Find out specific MAC filter */
3377 static struct i40e_mac_filter *
3378 i40e_find_mac_filter(struct i40e_vsi *vsi,
3379 struct ether_addr *macaddr)
3381 struct i40e_mac_filter *f;
3383 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3384 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3392 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3395 uint32_t vid_idx, vid_bit;
3397 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3398 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3400 if (vsi->vfta[vid_idx] & vid_bit)
3407 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3408 uint16_t vlan_id, bool on)
3410 uint32_t vid_idx, vid_bit;
3412 #define UINT32_BIT_MASK 0x1F
3413 #define VALID_VLAN_BIT_MASK 0xFFF
3414 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3415 * element first, then find the bits it belongs to
3417 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3419 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3422 vsi->vfta[vid_idx] |= vid_bit;
3424 vsi->vfta[vid_idx] &= ~vid_bit;
3428 * Find all vlan options for specific mac addr,
3429 * return with actual vlan found.
3432 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3433 struct i40e_macvlan_filter *mv_f,
3434 int num, struct ether_addr *addr)
3440 * Not to use i40e_find_vlan_filter to decrease the loop time,
3441 * although the code looks complex.
3443 if (num < vsi->vlan_num)
3444 return I40E_ERR_PARAM;
3447 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3449 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3450 if (vsi->vfta[j] & (1 << k)) {
3452 PMD_DRV_LOG(ERR, "vlan number "
3454 return I40E_ERR_PARAM;
3456 (void)rte_memcpy(&mv_f[i].macaddr,
3457 addr, ETH_ADDR_LEN);
3459 j * I40E_UINT32_BIT_SIZE + k;
3465 return I40E_SUCCESS;
3469 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3470 struct i40e_macvlan_filter *mv_f,
3475 struct i40e_mac_filter *f;
3477 if (num < vsi->mac_num)
3478 return I40E_ERR_PARAM;
3480 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3482 PMD_DRV_LOG(ERR, "buffer number not match\n");
3483 return I40E_ERR_PARAM;
3485 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3486 mv_f[i].vlan_id = vlan;
3490 return I40E_SUCCESS;
3494 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3497 struct i40e_mac_filter *f;
3498 struct i40e_macvlan_filter *mv_f;
3499 int ret = I40E_SUCCESS;
3501 if (vsi == NULL || vsi->mac_num == 0)
3502 return I40E_ERR_PARAM;
3504 /* Case that no vlan is set */
3505 if (vsi->vlan_num == 0)
3508 num = vsi->mac_num * vsi->vlan_num;
3510 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3512 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3513 return I40E_ERR_NO_MEMORY;
3517 if (vsi->vlan_num == 0) {
3518 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3519 (void)rte_memcpy(&mv_f[i].macaddr,
3520 &f->macaddr, ETH_ADDR_LEN);
3521 mv_f[i].vlan_id = 0;
3525 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3526 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3527 vsi->vlan_num, &f->macaddr);
3528 if (ret != I40E_SUCCESS)
3534 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3542 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3544 struct i40e_macvlan_filter *mv_f;
3546 int ret = I40E_SUCCESS;
3548 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3549 return I40E_ERR_PARAM;
3551 /* If it's already set, just return */
3552 if (i40e_find_vlan_filter(vsi,vlan))
3553 return I40E_SUCCESS;
3555 mac_num = vsi->mac_num;
3558 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3559 return I40E_ERR_PARAM;
3562 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3565 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3566 return I40E_ERR_NO_MEMORY;
3569 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3571 if (ret != I40E_SUCCESS)
3574 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3576 if (ret != I40E_SUCCESS)
3579 i40e_set_vlan_filter(vsi, vlan, 1);
3589 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3591 struct i40e_macvlan_filter *mv_f;
3593 int ret = I40E_SUCCESS;
3596 * Vlan 0 is the generic filter for untagged packets
3597 * and can't be removed.
3599 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3600 return I40E_ERR_PARAM;
3602 /* If can't find it, just return */
3603 if (!i40e_find_vlan_filter(vsi, vlan))
3604 return I40E_ERR_PARAM;
3606 mac_num = vsi->mac_num;
3609 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3610 return I40E_ERR_PARAM;
3613 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3616 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3617 return I40E_ERR_NO_MEMORY;
3620 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3622 if (ret != I40E_SUCCESS)
3625 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3627 if (ret != I40E_SUCCESS)
3630 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3631 if (vsi->vlan_num == 1) {
3632 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3633 if (ret != I40E_SUCCESS)
3636 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3637 if (ret != I40E_SUCCESS)
3641 i40e_set_vlan_filter(vsi, vlan, 0);
3651 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3653 struct i40e_mac_filter *f;
3654 struct i40e_macvlan_filter *mv_f;
3656 int ret = I40E_SUCCESS;
3658 /* If it's add and we've config it, return */
3659 f = i40e_find_mac_filter(vsi, addr);
3661 return I40E_SUCCESS;
3664 * If vlan_num is 0, that's the first time to add mac,
3665 * set mask for vlan_id 0.
3667 if (vsi->vlan_num == 0) {
3668 i40e_set_vlan_filter(vsi, 0, 1);
3672 vlan_num = vsi->vlan_num;
3674 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3676 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3677 return I40E_ERR_NO_MEMORY;
3680 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3681 if (ret != I40E_SUCCESS)
3684 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3685 if (ret != I40E_SUCCESS)
3688 /* Add the mac addr into mac list */
3689 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3691 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3692 ret = I40E_ERR_NO_MEMORY;
3695 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3696 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3707 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3709 struct i40e_mac_filter *f;
3710 struct i40e_macvlan_filter *mv_f;
3712 int ret = I40E_SUCCESS;
3714 /* Can't find it, return an error */
3715 f = i40e_find_mac_filter(vsi, addr);
3717 return I40E_ERR_PARAM;
3719 vlan_num = vsi->vlan_num;
3720 if (vlan_num == 0) {
3721 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
3722 return I40E_ERR_PARAM;
3724 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3726 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3727 return I40E_ERR_NO_MEMORY;
3730 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3731 if (ret != I40E_SUCCESS)
3734 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3735 if (ret != I40E_SUCCESS)
3738 /* Remove the mac addr into mac list */
3739 TAILQ_REMOVE(&vsi->mac_list, f, next);
3749 /* Configure hash enable flags for RSS */
3751 i40e_config_hena(uint64_t flags)
3758 if (flags & ETH_RSS_NONF_IPV4_UDP)
3759 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3760 if (flags & ETH_RSS_NONF_IPV4_TCP)
3761 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3762 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3763 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3764 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3765 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3766 if (flags & ETH_RSS_FRAG_IPV4)
3767 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3768 if (flags & ETH_RSS_NONF_IPV6_UDP)
3769 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3770 if (flags & ETH_RSS_NONF_IPV6_TCP)
3771 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3772 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3773 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3774 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3775 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3776 if (flags & ETH_RSS_FRAG_IPV6)
3777 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3778 if (flags & ETH_RSS_L2_PAYLOAD)
3779 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3784 /* Parse the hash enable flags */
3786 i40e_parse_hena(uint64_t flags)
3788 uint64_t rss_hf = 0;
3793 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3794 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3795 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3796 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3797 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3798 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3799 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3800 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3801 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3802 rss_hf |= ETH_RSS_FRAG_IPV4;
3803 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3804 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3805 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3806 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3807 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3808 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3809 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3810 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3811 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3812 rss_hf |= ETH_RSS_FRAG_IPV6;
3813 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3814 rss_hf |= ETH_RSS_L2_PAYLOAD;
3821 i40e_pf_disable_rss(struct i40e_pf *pf)
3823 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3826 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3827 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3828 hena &= ~I40E_RSS_HENA_ALL;
3829 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3830 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3831 I40E_WRITE_FLUSH(hw);
3835 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
3838 uint8_t hash_key_len;
3843 hash_key = (uint32_t *)(rss_conf->rss_key);
3844 hash_key_len = rss_conf->rss_key_len;
3845 if (hash_key != NULL && hash_key_len >=
3846 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3847 /* Fill in RSS hash key */
3848 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3849 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
3852 rss_hf = rss_conf->rss_hf;
3853 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3854 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3855 hena &= ~I40E_RSS_HENA_ALL;
3856 hena |= i40e_config_hena(rss_hf);
3857 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3858 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3859 I40E_WRITE_FLUSH(hw);
3865 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
3866 struct rte_eth_rss_conf *rss_conf)
3868 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3869 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
3872 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3873 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3874 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
3875 if (rss_hf != 0) /* Enable RSS */
3877 return 0; /* Nothing to do */
3880 if (rss_hf == 0) /* Disable RSS */
3883 return i40e_hw_rss_hash_set(hw, rss_conf);
3887 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3888 struct rte_eth_rss_conf *rss_conf)
3890 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3891 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
3895 if (hash_key != NULL) {
3896 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3897 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
3898 rss_conf->rss_key_len = i * sizeof(uint32_t);
3900 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3901 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3902 rss_conf->rss_hf = i40e_parse_hena(hena);
3909 i40e_pf_config_rss(struct i40e_pf *pf)
3911 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3912 struct rte_eth_rss_conf rss_conf;
3913 uint32_t i, lut = 0;
3914 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
3916 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
3919 lut = (lut << 8) | (j & ((0x1 <<
3920 hw->func_caps.rss_table_entry_width) - 1));
3922 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
3925 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
3926 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
3927 i40e_pf_disable_rss(pf);
3930 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
3931 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3932 /* Calculate the default hash key */
3933 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3934 rss_key_default[i] = (uint32_t)rte_rand();
3935 rss_conf.rss_key = (uint8_t *)rss_key_default;
3936 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3940 return i40e_hw_rss_hash_set(hw, &rss_conf);
3944 i40e_pf_config_mq_rx(struct i40e_pf *pf)
3946 if (!pf->dev_data->sriov.active) {
3947 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
3949 i40e_pf_config_rss(pf);
3952 i40e_pf_disable_rss(pf);