4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_RSS_OFFLOAD_ALL ( \
89 ETH_RSS_NONF_IPV4_UDP | \
90 ETH_RSS_NONF_IPV4_TCP | \
91 ETH_RSS_NONF_IPV4_SCTP | \
92 ETH_RSS_NONF_IPV4_OTHER | \
94 ETH_RSS_NONF_IPV6_UDP | \
95 ETH_RSS_NONF_IPV6_TCP | \
96 ETH_RSS_NONF_IPV6_SCTP | \
97 ETH_RSS_NONF_IPV6_OTHER | \
101 /* All bits of RSS hash enable */
102 #define I40E_RSS_HENA_ALL ( \
103 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
104 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
107 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
109 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
112 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
113 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
114 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
116 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
118 static int eth_i40e_dev_init(\
119 __attribute__((unused)) struct eth_driver *eth_drv,
120 struct rte_eth_dev *eth_dev);
121 static int i40e_dev_configure(struct rte_eth_dev *dev);
122 static int i40e_dev_start(struct rte_eth_dev *dev);
123 static void i40e_dev_stop(struct rte_eth_dev *dev);
124 static void i40e_dev_close(struct rte_eth_dev *dev);
125 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
126 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
127 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
130 struct rte_eth_stats *stats);
131 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
132 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
136 static void i40e_dev_info_get(struct rte_eth_dev *dev,
137 struct rte_eth_dev_info *dev_info);
138 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
141 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
142 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
143 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
146 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
147 static int i40e_dev_led_on(struct rte_eth_dev *dev);
148 static int i40e_dev_led_off(struct rte_eth_dev *dev);
149 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
150 struct rte_eth_fc_conf *fc_conf);
151 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
152 struct rte_eth_pfc_conf *pfc_conf);
153 static void i40e_macaddr_add(struct rte_eth_dev *dev,
154 struct ether_addr *mac_addr,
157 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
158 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
159 struct rte_eth_rss_reta *reta_conf);
160 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
161 struct rte_eth_rss_reta *reta_conf);
163 static int i40e_get_cap(struct i40e_hw *hw);
164 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
165 static int i40e_pf_setup(struct i40e_pf *pf);
166 static int i40e_vsi_init(struct i40e_vsi *vsi);
167 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
168 bool offset_loaded, uint64_t *offset, uint64_t *stat);
169 static void i40e_stat_update_48(struct i40e_hw *hw,
175 static void i40e_pf_config_irq0(struct i40e_hw *hw);
176 static void i40e_dev_interrupt_handler(
177 __rte_unused struct rte_intr_handle *handle, void *param);
178 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
179 uint32_t base, uint32_t num);
180 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
181 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
183 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
185 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
186 static int i40e_veb_release(struct i40e_veb *veb);
187 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
188 struct i40e_vsi *vsi);
189 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
190 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
191 static int i40e_pf_disable_all_queues(struct i40e_hw *hw);
192 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
193 struct i40e_macvlan_filter *mv_f,
195 struct ether_addr *addr);
196 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
197 struct i40e_macvlan_filter *mv_f,
200 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
201 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
202 struct rte_eth_rss_conf *rss_conf);
203 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
204 struct rte_eth_rss_conf *rss_conf);
206 /* Default hash key buffer for RSS */
207 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
209 static struct rte_pci_id pci_id_i40e_map[] = {
210 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
211 #include "rte_pci_dev_ids.h"
212 { .vendor_id = 0, /* sentinel */ },
215 static struct eth_dev_ops i40e_eth_dev_ops = {
216 .dev_configure = i40e_dev_configure,
217 .dev_start = i40e_dev_start,
218 .dev_stop = i40e_dev_stop,
219 .dev_close = i40e_dev_close,
220 .promiscuous_enable = i40e_dev_promiscuous_enable,
221 .promiscuous_disable = i40e_dev_promiscuous_disable,
222 .allmulticast_enable = i40e_dev_allmulticast_enable,
223 .allmulticast_disable = i40e_dev_allmulticast_disable,
224 .link_update = i40e_dev_link_update,
225 .stats_get = i40e_dev_stats_get,
226 .stats_reset = i40e_dev_stats_reset,
227 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
228 .dev_infos_get = i40e_dev_info_get,
229 .vlan_filter_set = i40e_vlan_filter_set,
230 .vlan_tpid_set = i40e_vlan_tpid_set,
231 .vlan_offload_set = i40e_vlan_offload_set,
232 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
233 .vlan_pvid_set = i40e_vlan_pvid_set,
234 .rx_queue_setup = i40e_dev_rx_queue_setup,
235 .rx_queue_release = i40e_dev_rx_queue_release,
236 .rx_queue_count = i40e_dev_rx_queue_count,
237 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
238 .tx_queue_setup = i40e_dev_tx_queue_setup,
239 .tx_queue_release = i40e_dev_tx_queue_release,
240 .dev_led_on = i40e_dev_led_on,
241 .dev_led_off = i40e_dev_led_off,
242 .flow_ctrl_set = i40e_flow_ctrl_set,
243 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
244 .mac_addr_add = i40e_macaddr_add,
245 .mac_addr_remove = i40e_macaddr_remove,
246 .reta_update = i40e_dev_rss_reta_update,
247 .reta_query = i40e_dev_rss_reta_query,
248 .rss_hash_update = i40e_dev_rss_hash_update,
249 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
252 static struct eth_driver rte_i40e_pmd = {
254 .name = "rte_i40e_pmd",
255 .id_table = pci_id_i40e_map,
256 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
258 .eth_dev_init = eth_i40e_dev_init,
259 .dev_private_size = sizeof(struct i40e_adapter),
263 i40e_prev_power_of_2(int n)
281 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
282 struct rte_eth_link *link)
284 struct rte_eth_link *dst = link;
285 struct rte_eth_link *src = &(dev->data->dev_link);
287 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
288 *(uint64_t *)src) == 0)
295 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
296 struct rte_eth_link *link)
298 struct rte_eth_link *dst = &(dev->data->dev_link);
299 struct rte_eth_link *src = link;
301 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
302 *(uint64_t *)src) == 0)
309 * Driver initialization routine.
310 * Invoked once at EAL init time.
311 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
314 rte_i40e_pmd_init(const char *name __rte_unused,
315 const char *params __rte_unused)
317 PMD_INIT_FUNC_TRACE();
318 rte_eth_driver_register(&rte_i40e_pmd);
323 static struct rte_driver rte_i40e_driver = {
325 .init = rte_i40e_pmd_init,
328 PMD_REGISTER_DRIVER(rte_i40e_driver);
331 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
332 struct rte_eth_dev *dev)
334 struct rte_pci_device *pci_dev;
335 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
336 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337 struct i40e_vsi *vsi;
342 PMD_INIT_FUNC_TRACE();
344 dev->dev_ops = &i40e_eth_dev_ops;
345 dev->rx_pkt_burst = i40e_recv_pkts;
346 dev->tx_pkt_burst = i40e_xmit_pkts;
348 /* for secondary processes, we don't initialise any further as primary
349 * has already done this work. Only check we don't need a different
351 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
352 if (dev->data->scattered_rx)
353 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
356 pci_dev = dev->pci_dev;
357 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
358 pf->adapter->eth_dev = dev;
359 pf->dev_data = dev->data;
361 hw->back = I40E_PF_TO_ADAPTER(pf);
362 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
364 PMD_INIT_LOG(ERR, "Hardware is not available, "
365 "as address is NULL\n");
369 hw->vendor_id = pci_dev->id.vendor_id;
370 hw->device_id = pci_dev->id.device_id;
371 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
372 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
373 hw->bus.device = pci_dev->addr.devid;
374 hw->bus.func = pci_dev->addr.function;
376 /* Disable all queues before PF reset, as required */
377 ret = i40e_pf_disable_all_queues(hw);
378 if (ret != I40E_SUCCESS) {
379 PMD_INIT_LOG(ERR, "Failed to disable queues %u\n", ret);
383 /* Reset here to make sure all is clean for each PF */
384 ret = i40e_pf_reset(hw);
386 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
390 /* Initialize the shared code */
391 ret = i40e_init_shared_code(hw);
393 PMD_INIT_LOG(ERR, "Failed to init shared code: %d", ret);
397 /* Initialize the parameters for adminq */
398 i40e_init_adminq_parameter(hw);
399 ret = i40e_init_adminq(hw);
400 if (ret != I40E_SUCCESS) {
401 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
404 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
405 "%02d.%02d.%02d eetrack %04x\n",
406 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
407 hw->aq.api_maj_ver, hw->aq.api_min_ver,
408 ((hw->nvm.version >> 12) & 0xf),
409 ((hw->nvm.version >> 4) & 0xff),
410 (hw->nvm.version & 0xf), hw->nvm.eetrack);
413 ret = i40e_aq_stop_lldp(hw, true, NULL);
414 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
415 PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
418 i40e_clear_pxe_mode(hw);
420 /* Get hw capabilities */
421 ret = i40e_get_cap(hw);
422 if (ret != I40E_SUCCESS) {
423 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
424 goto err_get_capabilities;
427 /* Initialize parameters for PF */
428 ret = i40e_pf_parameter_init(dev);
430 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
431 goto err_parameter_init;
434 /* Initialize the queue management */
435 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
437 PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
438 goto err_qp_pool_init;
440 ret = i40e_res_pool_init(&pf->msix_pool, 1,
441 hw->func_caps.num_msix_vectors - 1);
443 PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
444 goto err_msix_pool_init;
447 /* Initialize lan hmc */
448 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
449 hw->func_caps.num_rx_qp, 0, 0);
450 if (ret != I40E_SUCCESS) {
451 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
452 goto err_init_lan_hmc;
455 /* Configure lan hmc */
456 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
457 if (ret != I40E_SUCCESS) {
458 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
459 goto err_configure_lan_hmc;
462 /* Get and check the mac address */
463 i40e_get_mac_addr(hw, hw->mac.addr);
464 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
465 PMD_INIT_LOG(ERR, "mac address is not valid");
467 goto err_get_mac_addr;
469 /* Copy the permanent MAC address */
470 ether_addr_copy((struct ether_addr *) hw->mac.addr,
471 (struct ether_addr *) hw->mac.perm_addr);
473 /* Disable flow control */
474 hw->fc.requested_mode = I40E_FC_NONE;
475 i40e_set_fc(hw, &aq_fail, TRUE);
477 /* PF setup, which includes VSI setup */
478 ret = i40e_pf_setup(pf);
480 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
481 goto err_setup_pf_switch;
485 if (!vsi->max_macaddrs)
486 len = ETHER_ADDR_LEN;
488 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
490 /* Should be after VSI initialized */
491 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
492 if (!dev->data->mac_addrs) {
493 PMD_INIT_LOG(ERR, "Failed to allocated memory "
494 "for storing mac address");
495 goto err_get_mac_addr;
497 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
498 &dev->data->mac_addrs[0]);
500 /* initialize pf host driver to setup SRIOV resource if applicable */
501 i40e_pf_host_init(dev);
503 /* register callback func to eal lib */
504 rte_intr_callback_register(&(pci_dev->intr_handle),
505 i40e_dev_interrupt_handler, (void *)dev);
507 /* configure and enable device interrupt */
508 i40e_pf_config_irq0(hw);
509 i40e_pf_enable_irq0(hw);
511 /* enable uio intr after callback register */
512 rte_intr_enable(&(pci_dev->intr_handle));
517 rte_free(pf->main_vsi);
519 err_configure_lan_hmc:
520 (void)i40e_shutdown_lan_hmc(hw);
522 i40e_res_pool_destroy(&pf->msix_pool);
524 i40e_res_pool_destroy(&pf->qp_pool);
527 err_get_capabilities:
528 (void)i40e_shutdown_adminq(hw);
534 i40e_dev_configure(struct rte_eth_dev *dev)
536 return i40e_dev_init_vlan(dev);
540 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
542 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
543 uint16_t msix_vect = vsi->msix_intr;
546 for (i = 0; i < vsi->nb_qps; i++) {
547 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
548 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
552 if (vsi->type != I40E_VSI_SRIOV) {
553 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
554 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
558 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
559 vsi->user_param + (msix_vect - 1);
561 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
563 I40E_WRITE_FLUSH(hw);
566 static inline uint16_t
567 i40e_calc_itr_interval(int16_t interval)
569 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
570 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
572 /* Convert to hardware count, as writing each 1 represents 2 us */
577 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
580 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
581 uint16_t msix_vect = vsi->msix_intr;
582 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
585 for (i = 0; i < vsi->nb_qps; i++)
586 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
588 /* Bind all RX queues to allocated MSIX interrupt */
589 for (i = 0; i < vsi->nb_qps; i++) {
590 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
591 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
592 ((vsi->base_queue + i + 1) <<
593 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
594 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
595 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
597 if (i == vsi->nb_qps - 1)
598 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
599 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
602 /* Write first RX queue to Link list register as the head element */
603 if (vsi->type != I40E_VSI_SRIOV) {
604 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
605 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
606 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
608 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
609 msix_vect - 1), interval);
611 /* Disable auto-mask on enabling of all none-zero interrupt */
612 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
613 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
617 /* num_msix_vectors_vf needs to minus irq0 */
618 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
619 vsi->user_param + (msix_vect - 1);
621 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
622 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
623 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
626 I40E_WRITE_FLUSH(hw);
630 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
632 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
633 uint16_t interval = i40e_calc_itr_interval(\
634 RTE_LIBRTE_I40E_ITR_INTERVAL);
636 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
637 I40E_PFINT_DYN_CTLN_INTENA_MASK |
638 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
639 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
640 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
644 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
646 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
648 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
652 i40e_dev_start(struct rte_eth_dev *dev)
654 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
655 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
656 struct i40e_vsi *vsi = pf->main_vsi;
660 ret = i40e_vsi_init(vsi);
661 if (ret != I40E_SUCCESS) {
662 PMD_DRV_LOG(ERR, "Failed to init VSI\n");
666 /* Map queues with MSIX interrupt */
667 i40e_vsi_queues_bind_intr(vsi);
668 i40e_vsi_enable_queues_intr(vsi);
670 /* Enable all queues which have been configured */
671 ret = i40e_vsi_switch_queues(vsi, TRUE);
672 if (ret != I40E_SUCCESS) {
673 PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
677 /* Enable receiving broadcast packets */
678 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
679 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
680 if (ret != I40E_SUCCESS)
681 PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
687 i40e_vsi_switch_queues(vsi, FALSE);
688 i40e_dev_clear_queues(dev);
694 i40e_dev_stop(struct rte_eth_dev *dev)
696 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
697 struct i40e_vsi *vsi = pf->main_vsi;
699 /* Disable all queues */
700 i40e_vsi_switch_queues(vsi, FALSE);
702 /* Clear all queues and release memory */
703 i40e_dev_clear_queues(dev);
705 /* un-map queues with interrupt registers */
706 i40e_vsi_disable_queues_intr(vsi);
707 i40e_vsi_queues_unbind_intr(vsi);
711 i40e_dev_close(struct rte_eth_dev *dev)
713 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
714 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
717 PMD_INIT_FUNC_TRACE();
721 /* Disable interrupt */
722 i40e_pf_disable_irq0(hw);
723 rte_intr_disable(&(dev->pci_dev->intr_handle));
725 /* shutdown and destroy the HMC */
726 i40e_shutdown_lan_hmc(hw);
728 /* release all the existing VSIs and VEBs */
729 i40e_vsi_release(pf->main_vsi);
731 /* shutdown the adminq */
732 i40e_aq_queue_shutdown(hw, true);
733 i40e_shutdown_adminq(hw);
735 i40e_res_pool_destroy(&pf->qp_pool);
736 i40e_res_pool_destroy(&pf->msix_pool);
738 /* force a PF reset to clean anything leftover */
739 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
740 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
741 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
742 I40E_WRITE_FLUSH(hw);
746 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
748 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
749 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
750 struct i40e_vsi *vsi = pf->main_vsi;
753 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
755 if (status != I40E_SUCCESS)
756 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
760 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
762 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
763 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
764 struct i40e_vsi *vsi = pf->main_vsi;
767 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
769 if (status != I40E_SUCCESS)
770 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
774 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
776 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
777 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
778 struct i40e_vsi *vsi = pf->main_vsi;
781 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
782 if (ret != I40E_SUCCESS)
783 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
787 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
789 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
790 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
791 struct i40e_vsi *vsi = pf->main_vsi;
794 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
795 vsi->seid, FALSE, NULL);
796 if (ret != I40E_SUCCESS)
797 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
801 i40e_dev_link_update(struct rte_eth_dev *dev,
802 __rte_unused int wait_to_complete)
804 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
805 struct i40e_link_status link_status;
806 struct rte_eth_link link, old;
809 memset(&link, 0, sizeof(link));
810 memset(&old, 0, sizeof(old));
811 memset(&link_status, 0, sizeof(link_status));
812 rte_i40e_dev_atomic_read_link_status(dev, &old);
814 /* Get link status information from hardware */
815 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
816 if (status != I40E_SUCCESS) {
817 link.link_speed = ETH_LINK_SPEED_100;
818 link.link_duplex = ETH_LINK_FULL_DUPLEX;
819 PMD_DRV_LOG(ERR, "Failed to get link info\n");
823 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
825 if (!link.link_status)
828 /* i40e uses full duplex only */
829 link.link_duplex = ETH_LINK_FULL_DUPLEX;
831 /* Parse the link status */
832 switch (link_status.link_speed) {
833 case I40E_LINK_SPEED_100MB:
834 link.link_speed = ETH_LINK_SPEED_100;
836 case I40E_LINK_SPEED_1GB:
837 link.link_speed = ETH_LINK_SPEED_1000;
839 case I40E_LINK_SPEED_10GB:
840 link.link_speed = ETH_LINK_SPEED_10G;
842 case I40E_LINK_SPEED_20GB:
843 link.link_speed = ETH_LINK_SPEED_20G;
845 case I40E_LINK_SPEED_40GB:
846 link.link_speed = ETH_LINK_SPEED_40G;
849 link.link_speed = ETH_LINK_SPEED_100;
854 rte_i40e_dev_atomic_write_link_status(dev, &link);
855 if (link.link_status == old.link_status)
861 /* Get all the statistics of a VSI */
863 i40e_update_vsi_stats(struct i40e_vsi *vsi)
865 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
866 struct i40e_eth_stats *nes = &vsi->eth_stats;
867 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
868 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
870 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
871 vsi->offset_loaded, &oes->rx_bytes,
873 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
874 vsi->offset_loaded, &oes->rx_unicast,
876 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
877 vsi->offset_loaded, &oes->rx_multicast,
879 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
880 vsi->offset_loaded, &oes->rx_broadcast,
882 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
883 &oes->rx_discards, &nes->rx_discards);
884 /* GLV_REPC not supported */
885 /* GLV_RMPC not supported */
886 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
887 &oes->rx_unknown_protocol,
888 &nes->rx_unknown_protocol);
889 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
890 vsi->offset_loaded, &oes->tx_bytes,
892 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
893 vsi->offset_loaded, &oes->tx_unicast,
895 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
896 vsi->offset_loaded, &oes->tx_multicast,
898 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
899 vsi->offset_loaded, &oes->tx_broadcast,
901 /* GLV_TDPC not supported */
902 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
903 &oes->tx_errors, &nes->tx_errors);
904 vsi->offset_loaded = true;
906 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
907 printf("***************** VSI[%u] stats start *******************\n",
909 printf("rx_bytes: %lu\n", nes->rx_bytes);
910 printf("rx_unicast: %lu\n", nes->rx_unicast);
911 printf("rx_multicast: %lu\n", nes->rx_multicast);
912 printf("rx_broadcast: %lu\n", nes->rx_broadcast);
913 printf("rx_discards: %lu\n", nes->rx_discards);
914 printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
915 printf("tx_bytes: %lu\n", nes->tx_bytes);
916 printf("tx_unicast: %lu\n", nes->tx_unicast);
917 printf("tx_multicast: %lu\n", nes->tx_multicast);
918 printf("tx_broadcast: %lu\n", nes->tx_broadcast);
919 printf("tx_discards: %lu\n", nes->tx_discards);
920 printf("tx_errors: %lu\n", nes->tx_errors);
921 printf("***************** VSI[%u] stats end *******************\n",
923 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
926 /* Get all statistics of a port */
928 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
931 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
932 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
934 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
936 /* Get statistics of struct i40e_eth_stats */
937 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
938 I40E_GLPRT_GORCL(hw->port),
939 pf->offset_loaded, &os->eth.rx_bytes,
941 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
942 I40E_GLPRT_UPRCL(hw->port),
943 pf->offset_loaded, &os->eth.rx_unicast,
944 &ns->eth.rx_unicast);
945 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
946 I40E_GLPRT_MPRCL(hw->port),
947 pf->offset_loaded, &os->eth.rx_multicast,
948 &ns->eth.rx_multicast);
949 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
950 I40E_GLPRT_BPRCL(hw->port),
951 pf->offset_loaded, &os->eth.rx_broadcast,
952 &ns->eth.rx_broadcast);
953 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
954 pf->offset_loaded, &os->eth.rx_discards,
955 &ns->eth.rx_discards);
956 /* GLPRT_REPC not supported */
957 /* GLPRT_RMPC not supported */
958 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
960 &os->eth.rx_unknown_protocol,
961 &ns->eth.rx_unknown_protocol);
962 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
963 I40E_GLPRT_GOTCL(hw->port),
964 pf->offset_loaded, &os->eth.tx_bytes,
966 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
967 I40E_GLPRT_UPTCL(hw->port),
968 pf->offset_loaded, &os->eth.tx_unicast,
969 &ns->eth.tx_unicast);
970 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
971 I40E_GLPRT_MPTCL(hw->port),
972 pf->offset_loaded, &os->eth.tx_multicast,
973 &ns->eth.tx_multicast);
974 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
975 I40E_GLPRT_BPTCL(hw->port),
976 pf->offset_loaded, &os->eth.tx_broadcast,
977 &ns->eth.tx_broadcast);
978 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
979 pf->offset_loaded, &os->eth.tx_discards,
980 &ns->eth.tx_discards);
981 /* GLPRT_TEPC not supported */
983 /* additional port specific stats */
984 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
985 pf->offset_loaded, &os->tx_dropped_link_down,
986 &ns->tx_dropped_link_down);
987 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
988 pf->offset_loaded, &os->crc_errors,
990 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
991 pf->offset_loaded, &os->illegal_bytes,
993 /* GLPRT_ERRBC not supported */
994 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
995 pf->offset_loaded, &os->mac_local_faults,
996 &ns->mac_local_faults);
997 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
998 pf->offset_loaded, &os->mac_remote_faults,
999 &ns->mac_remote_faults);
1000 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1001 pf->offset_loaded, &os->rx_length_errors,
1002 &ns->rx_length_errors);
1003 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1004 pf->offset_loaded, &os->link_xon_rx,
1006 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1007 pf->offset_loaded, &os->link_xoff_rx,
1009 for (i = 0; i < 8; i++) {
1010 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1012 &os->priority_xon_rx[i],
1013 &ns->priority_xon_rx[i]);
1014 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1016 &os->priority_xoff_rx[i],
1017 &ns->priority_xoff_rx[i]);
1019 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1020 pf->offset_loaded, &os->link_xon_tx,
1022 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1023 pf->offset_loaded, &os->link_xoff_tx,
1025 for (i = 0; i < 8; i++) {
1026 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1028 &os->priority_xon_tx[i],
1029 &ns->priority_xon_tx[i]);
1030 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1032 &os->priority_xoff_tx[i],
1033 &ns->priority_xoff_tx[i]);
1034 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1036 &os->priority_xon_2_xoff[i],
1037 &ns->priority_xon_2_xoff[i]);
1039 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1040 I40E_GLPRT_PRC64L(hw->port),
1041 pf->offset_loaded, &os->rx_size_64,
1043 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1044 I40E_GLPRT_PRC127L(hw->port),
1045 pf->offset_loaded, &os->rx_size_127,
1047 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1048 I40E_GLPRT_PRC255L(hw->port),
1049 pf->offset_loaded, &os->rx_size_255,
1051 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1052 I40E_GLPRT_PRC511L(hw->port),
1053 pf->offset_loaded, &os->rx_size_511,
1055 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1056 I40E_GLPRT_PRC1023L(hw->port),
1057 pf->offset_loaded, &os->rx_size_1023,
1059 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1060 I40E_GLPRT_PRC1522L(hw->port),
1061 pf->offset_loaded, &os->rx_size_1522,
1063 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1064 I40E_GLPRT_PRC9522L(hw->port),
1065 pf->offset_loaded, &os->rx_size_big,
1067 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1068 pf->offset_loaded, &os->rx_undersize,
1070 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1071 pf->offset_loaded, &os->rx_fragments,
1073 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1074 pf->offset_loaded, &os->rx_oversize,
1076 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1077 pf->offset_loaded, &os->rx_jabber,
1079 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1080 I40E_GLPRT_PTC64L(hw->port),
1081 pf->offset_loaded, &os->tx_size_64,
1083 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1084 I40E_GLPRT_PTC127L(hw->port),
1085 pf->offset_loaded, &os->tx_size_127,
1087 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1088 I40E_GLPRT_PTC255L(hw->port),
1089 pf->offset_loaded, &os->tx_size_255,
1091 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1092 I40E_GLPRT_PTC511L(hw->port),
1093 pf->offset_loaded, &os->tx_size_511,
1095 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1096 I40E_GLPRT_PTC1023L(hw->port),
1097 pf->offset_loaded, &os->tx_size_1023,
1099 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1100 I40E_GLPRT_PTC1522L(hw->port),
1101 pf->offset_loaded, &os->tx_size_1522,
1103 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1104 I40E_GLPRT_PTC9522L(hw->port),
1105 pf->offset_loaded, &os->tx_size_big,
1107 /* GLPRT_MSPDC not supported */
1108 /* GLPRT_XEC not supported */
1110 pf->offset_loaded = true;
1112 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1113 ns->eth.rx_broadcast;
1114 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1115 ns->eth.tx_broadcast;
1116 stats->ibytes = ns->eth.rx_bytes;
1117 stats->obytes = ns->eth.tx_bytes;
1118 stats->oerrors = ns->eth.tx_errors;
1119 stats->imcasts = ns->eth.rx_multicast;
1122 i40e_update_vsi_stats(pf->main_vsi);
1124 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1125 printf("***************** PF stats start *******************\n");
1126 printf("rx_bytes: %lu\n", ns->eth.rx_bytes);
1127 printf("rx_unicast: %lu\n", ns->eth.rx_unicast);
1128 printf("rx_multicast: %lu\n", ns->eth.rx_multicast);
1129 printf("rx_broadcast: %lu\n", ns->eth.rx_broadcast);
1130 printf("rx_discards: %lu\n", ns->eth.rx_discards);
1131 printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
1132 printf("tx_bytes: %lu\n", ns->eth.tx_bytes);
1133 printf("tx_unicast: %lu\n", ns->eth.tx_unicast);
1134 printf("tx_multicast: %lu\n", ns->eth.tx_multicast);
1135 printf("tx_broadcast: %lu\n", ns->eth.tx_broadcast);
1136 printf("tx_discards: %lu\n", ns->eth.tx_discards);
1137 printf("tx_errors: %lu\n", ns->eth.tx_errors);
1139 printf("tx_dropped_link_down: %lu\n", ns->tx_dropped_link_down);
1140 printf("crc_errors: %lu\n", ns->crc_errors);
1141 printf("illegal_bytes: %lu\n", ns->illegal_bytes);
1142 printf("error_bytes: %lu\n", ns->error_bytes);
1143 printf("mac_local_faults: %lu\n", ns->mac_local_faults);
1144 printf("mac_remote_faults: %lu\n", ns->mac_remote_faults);
1145 printf("rx_length_errors: %lu\n", ns->rx_length_errors);
1146 printf("link_xon_rx: %lu\n", ns->link_xon_rx);
1147 printf("link_xoff_rx: %lu\n", ns->link_xoff_rx);
1148 for (i = 0; i < 8; i++) {
1149 printf("priority_xon_rx[%d]: %lu\n",
1150 i, ns->priority_xon_rx[i]);
1151 printf("priority_xoff_rx[%d]: %lu\n",
1152 i, ns->priority_xoff_rx[i]);
1154 printf("link_xon_tx: %lu\n", ns->link_xon_tx);
1155 printf("link_xoff_tx: %lu\n", ns->link_xoff_tx);
1156 for (i = 0; i < 8; i++) {
1157 printf("priority_xon_tx[%d]: %lu\n",
1158 i, ns->priority_xon_tx[i]);
1159 printf("priority_xoff_tx[%d]: %lu\n",
1160 i, ns->priority_xoff_tx[i]);
1161 printf("priority_xon_2_xoff[%d]: %lu\n",
1162 i, ns->priority_xon_2_xoff[i]);
1164 printf("rx_size_64: %lu\n", ns->rx_size_64);
1165 printf("rx_size_127: %lu\n", ns->rx_size_127);
1166 printf("rx_size_255: %lu\n", ns->rx_size_255);
1167 printf("rx_size_511: %lu\n", ns->rx_size_511);
1168 printf("rx_size_1023: %lu\n", ns->rx_size_1023);
1169 printf("rx_size_1522: %lu\n", ns->rx_size_1522);
1170 printf("rx_size_big: %lu\n", ns->rx_size_big);
1171 printf("rx_undersize: %lu\n", ns->rx_undersize);
1172 printf("rx_fragments: %lu\n", ns->rx_fragments);
1173 printf("rx_oversize: %lu\n", ns->rx_oversize);
1174 printf("rx_jabber: %lu\n", ns->rx_jabber);
1175 printf("tx_size_64: %lu\n", ns->tx_size_64);
1176 printf("tx_size_127: %lu\n", ns->tx_size_127);
1177 printf("tx_size_255: %lu\n", ns->tx_size_255);
1178 printf("tx_size_511: %lu\n", ns->tx_size_511);
1179 printf("tx_size_1023: %lu\n", ns->tx_size_1023);
1180 printf("tx_size_1522: %lu\n", ns->tx_size_1522);
1181 printf("tx_size_big: %lu\n", ns->tx_size_big);
1182 printf("mac_short_packet_dropped: %lu\n",
1183 ns->mac_short_packet_dropped);
1184 printf("checksum_error: %lu\n", ns->checksum_error);
1185 printf("***************** PF stats end ********************\n");
1186 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1189 /* Reset the statistics */
1191 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1193 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1195 /* It results in reloading the start point of each counter */
1196 pf->offset_loaded = false;
1200 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1201 __rte_unused uint16_t queue_id,
1202 __rte_unused uint8_t stat_idx,
1203 __rte_unused uint8_t is_rx)
1205 PMD_INIT_FUNC_TRACE();
1211 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1213 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1214 struct i40e_vsi *vsi = pf->main_vsi;
1216 dev_info->max_rx_queues = vsi->nb_qps;
1217 dev_info->max_tx_queues = vsi->nb_qps;
1218 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1219 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1220 dev_info->max_mac_addrs = vsi->max_macaddrs;
1221 dev_info->max_vfs = dev->pci_dev->max_vfs;
1222 dev_info->rx_offload_capa =
1223 DEV_RX_OFFLOAD_VLAN_STRIP |
1224 DEV_RX_OFFLOAD_IPV4_CKSUM |
1225 DEV_RX_OFFLOAD_UDP_CKSUM |
1226 DEV_RX_OFFLOAD_TCP_CKSUM;
1227 dev_info->tx_offload_capa =
1228 DEV_TX_OFFLOAD_VLAN_INSERT |
1229 DEV_TX_OFFLOAD_IPV4_CKSUM |
1230 DEV_TX_OFFLOAD_UDP_CKSUM |
1231 DEV_TX_OFFLOAD_TCP_CKSUM |
1232 DEV_TX_OFFLOAD_SCTP_CKSUM;
1236 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1238 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1239 struct i40e_vsi *vsi = pf->main_vsi;
1240 PMD_INIT_FUNC_TRACE();
1243 return i40e_vsi_add_vlan(vsi, vlan_id);
1245 return i40e_vsi_delete_vlan(vsi, vlan_id);
1249 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1250 __rte_unused uint16_t tpid)
1252 PMD_INIT_FUNC_TRACE();
1256 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1258 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1259 struct i40e_vsi *vsi = pf->main_vsi;
1261 if (mask & ETH_VLAN_STRIP_MASK) {
1262 /* Enable or disable VLAN stripping */
1263 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1264 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1266 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1269 if (mask & ETH_VLAN_EXTEND_MASK) {
1270 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1271 i40e_vsi_config_double_vlan(vsi, TRUE);
1273 i40e_vsi_config_double_vlan(vsi, FALSE);
1278 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1279 __rte_unused uint16_t queue,
1280 __rte_unused int on)
1282 PMD_INIT_FUNC_TRACE();
1286 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1288 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1289 struct i40e_vsi *vsi = pf->main_vsi;
1290 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1291 struct i40e_vsi_vlan_pvid_info info;
1293 memset(&info, 0, sizeof(info));
1296 info.config.pvid = pvid;
1298 info.config.reject.tagged =
1299 data->dev_conf.txmode.hw_vlan_reject_tagged;
1300 info.config.reject.untagged =
1301 data->dev_conf.txmode.hw_vlan_reject_untagged;
1304 return i40e_vsi_vlan_pvid_set(vsi, &info);
1308 i40e_dev_led_on(struct rte_eth_dev *dev)
1310 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1311 uint32_t mode = i40e_led_get(hw);
1314 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1320 i40e_dev_led_off(struct rte_eth_dev *dev)
1322 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1323 uint32_t mode = i40e_led_get(hw);
1326 i40e_led_set(hw, 0, false);
1332 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1333 __rte_unused struct rte_eth_fc_conf *fc_conf)
1335 PMD_INIT_FUNC_TRACE();
1341 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1342 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1344 PMD_INIT_FUNC_TRACE();
1349 /* Add a MAC address, and update filters */
1351 i40e_macaddr_add(struct rte_eth_dev *dev,
1352 struct ether_addr *mac_addr,
1353 __attribute__((unused)) uint32_t index,
1354 __attribute__((unused)) uint32_t pool)
1356 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1357 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358 struct i40e_vsi *vsi = pf->main_vsi;
1359 struct ether_addr old_mac;
1362 if (!is_valid_assigned_ether_addr(mac_addr)) {
1363 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1367 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1368 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
1372 /* Write mac address */
1373 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1374 mac_addr->addr_bytes, NULL);
1375 if (ret != I40E_SUCCESS) {
1376 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1380 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1381 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1384 ret = i40e_vsi_add_mac(vsi, mac_addr);
1385 if (ret != I40E_SUCCESS) {
1386 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
1390 ether_addr_copy(mac_addr, &pf->dev_addr);
1391 i40e_vsi_delete_mac(vsi, &old_mac);
1394 /* Remove a MAC address, and update filters */
1396 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1398 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1399 struct i40e_vsi *vsi = pf->main_vsi;
1400 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1401 struct ether_addr *macaddr;
1403 struct i40e_hw *hw =
1404 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1406 if (index >= vsi->max_macaddrs)
1409 macaddr = &(data->mac_addrs[index]);
1410 if (!is_valid_assigned_ether_addr(macaddr))
1413 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1414 hw->mac.perm_addr, NULL);
1415 if (ret != I40E_SUCCESS) {
1416 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1420 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1422 ret = i40e_vsi_delete_mac(vsi, macaddr);
1423 if (ret != I40E_SUCCESS)
1426 /* Clear device address as it has been removed */
1427 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1428 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1432 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1433 struct rte_eth_rss_reta *reta_conf)
1435 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1437 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1439 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1441 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1443 mask = (uint8_t)((reta_conf->mask_hi >>
1452 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1454 for (j = 0, lut = 0; j < 4; j++) {
1455 if (mask & (0x1 << j))
1456 lut |= reta_conf->reta[i + j] << (8 * j);
1458 lut |= l & (0xFF << (8 * j));
1460 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1467 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1468 struct rte_eth_rss_reta *reta_conf)
1470 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1472 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1474 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1476 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1478 mask = (uint8_t)((reta_conf->mask_hi >>
1484 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1485 for (j = 0; j < 4; j++) {
1486 if (mask & (0x1 << j))
1487 reta_conf->reta[i + j] =
1488 (uint8_t)((lut >> (8 * j)) & 0xFF);
1496 * i40e_allocate_dma_mem_d - specific memory alloc for shared code
1497 * @hw: pointer to the HW structure
1498 * @mem: pointer to mem struct to fill out
1499 * @size: size of memory requested
1500 * @alignment: what to align the allocation to
1502 enum i40e_status_code
1503 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1504 struct i40e_dma_mem *mem,
1508 static uint64_t id = 0;
1509 const struct rte_memzone *mz = NULL;
1510 char z_name[RTE_MEMZONE_NAMESIZE];
1513 return I40E_ERR_PARAM;
1516 rte_snprintf(z_name, sizeof(z_name), "i40e_dma_%lu", id);
1517 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1519 return I40E_ERR_NO_MEMORY;
1524 mem->pa = mz->phys_addr;
1526 return I40E_SUCCESS;
1530 * i40e_free_dma_mem_d - specific memory free for shared code
1531 * @hw: pointer to the HW structure
1532 * @mem: ptr to mem struct to free
1534 enum i40e_status_code
1535 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1536 struct i40e_dma_mem *mem)
1538 if (!mem || !mem->va)
1539 return I40E_ERR_PARAM;
1544 return I40E_SUCCESS;
1548 * i40e_allocate_virt_mem_d - specific memory alloc for shared code
1549 * @hw: pointer to the HW structure
1550 * @mem: pointer to mem struct to fill out
1551 * @size: size of memory requested
1553 enum i40e_status_code
1554 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1555 struct i40e_virt_mem *mem,
1559 return I40E_ERR_PARAM;
1562 mem->va = rte_zmalloc("i40e", size, 0);
1565 return I40E_SUCCESS;
1567 return I40E_ERR_NO_MEMORY;
1571 * i40e_free_virt_mem_d - specific memory free for shared code
1572 * @hw: pointer to the HW structure
1573 * @mem: pointer to mem struct to free
1575 enum i40e_status_code
1576 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1577 struct i40e_virt_mem *mem)
1580 return I40E_ERR_PARAM;
1585 return I40E_SUCCESS;
1589 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1591 rte_spinlock_init(&sp->spinlock);
1595 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1597 rte_spinlock_lock(&sp->spinlock);
1601 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1603 rte_spinlock_unlock(&sp->spinlock);
1607 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1613 * Get the hardware capabilities, which will be parsed
1614 * and saved into struct i40e_hw.
1617 i40e_get_cap(struct i40e_hw *hw)
1619 struct i40e_aqc_list_capabilities_element_resp *buf;
1620 uint16_t len, size = 0;
1623 /* Calculate a huge enough buff for saving response data temporarily */
1624 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1625 I40E_MAX_CAP_ELE_NUM;
1626 buf = rte_zmalloc("i40e", len, 0);
1628 PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
1629 return I40E_ERR_NO_MEMORY;
1632 /* Get, parse the capabilities and save it to hw */
1633 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1634 i40e_aqc_opc_list_func_capabilities, NULL);
1635 if (ret != I40E_SUCCESS)
1636 PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
1638 /* Free the temporary buffer after being used */
1645 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1647 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1648 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1649 uint16_t sum_queues = 0, sum_vsis;
1651 /* First check if FW support SRIOV */
1652 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1653 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
1657 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1658 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1659 PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
1660 /* Allocate queues for pf */
1661 if (hw->func_caps.rss) {
1662 pf->flags |= I40E_FLAG_RSS;
1663 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1664 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1665 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1668 sum_queues = pf->lan_nb_qps;
1669 /* Default VSI is not counted in */
1671 PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
1673 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1674 pf->flags |= I40E_FLAG_SRIOV;
1675 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1676 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1677 PMD_INIT_LOG(ERR, "Config VF number %u, "
1678 "max supported %u.\n", dev->pci_dev->max_vfs,
1679 hw->func_caps.num_vfs);
1682 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1683 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1684 "max support %u queues.\n", pf->vf_nb_qps,
1685 I40E_MAX_QP_NUM_PER_VF);
1688 pf->vf_num = dev->pci_dev->max_vfs;
1689 sum_queues += pf->vf_nb_qps * pf->vf_num;
1690 sum_vsis += pf->vf_num;
1691 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
1692 pf->vf_num, pf->vf_nb_qps);
1696 if (hw->func_caps.vmdq) {
1697 pf->flags |= I40E_FLAG_VMDQ;
1698 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1699 sum_queues += pf->vmdq_nb_qps;
1701 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
1704 if (hw->func_caps.fd) {
1705 pf->flags |= I40E_FLAG_FDIR;
1706 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1708 * Each flow director consumes one VSI and one queue,
1709 * but can't calculate out predictably here.
1713 if (sum_vsis > pf->max_num_vsi ||
1714 sum_queues > hw->func_caps.num_rx_qp) {
1715 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
1716 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
1717 pf->max_num_vsi, sum_vsis);
1718 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
1719 hw->func_caps.num_rx_qp, sum_queues);
1723 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
1724 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1725 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
1726 sum_vsis, hw->func_caps.num_msix_vectors);
1729 return I40E_SUCCESS;
1733 i40e_pf_get_switch_config(struct i40e_pf *pf)
1735 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1736 struct i40e_aqc_get_switch_config_resp *switch_config;
1737 struct i40e_aqc_switch_config_element_resp *element;
1738 uint16_t start_seid = 0, num_reported;
1741 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1742 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1743 if (!switch_config) {
1744 PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
1748 /* Get the switch configurations */
1749 ret = i40e_aq_get_switch_config(hw, switch_config,
1750 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1751 if (ret != I40E_SUCCESS) {
1752 PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
1755 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1756 if (num_reported != 1) { /* The number should be 1 */
1757 PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
1761 /* Parse the switch configuration elements */
1762 element = &(switch_config->element[0]);
1763 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1764 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1765 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1767 PMD_DRV_LOG(INFO, "Unknown element type\n");
1770 rte_free(switch_config);
1776 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1779 struct pool_entry *entry;
1781 if (pool == NULL || num == 0)
1784 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1785 if (entry == NULL) {
1786 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1791 /* queue heap initialize */
1792 pool->num_free = num;
1793 pool->num_alloc = 0;
1795 LIST_INIT(&pool->alloc_list);
1796 LIST_INIT(&pool->free_list);
1798 /* Initialize element */
1802 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1807 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1809 struct pool_entry *entry;
1814 LIST_FOREACH(entry, &pool->alloc_list, next) {
1815 LIST_REMOVE(entry, next);
1819 LIST_FOREACH(entry, &pool->free_list, next) {
1820 LIST_REMOVE(entry, next);
1825 pool->num_alloc = 0;
1827 LIST_INIT(&pool->alloc_list);
1828 LIST_INIT(&pool->free_list);
1832 i40e_res_pool_free(struct i40e_res_pool_info *pool,
1835 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
1836 uint32_t pool_offset;
1840 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1844 pool_offset = base - pool->base;
1845 /* Lookup in alloc list */
1846 LIST_FOREACH(entry, &pool->alloc_list, next) {
1847 if (entry->base == pool_offset) {
1848 valid_entry = entry;
1849 LIST_REMOVE(entry, next);
1854 /* Not find, return */
1855 if (valid_entry == NULL) {
1856 PMD_DRV_LOG(ERR, "Failed to find entry\n");
1861 * Found it, move it to free list and try to merge.
1862 * In order to make merge easier, always sort it by qbase.
1863 * Find adjacent prev and last entries.
1866 LIST_FOREACH(entry, &pool->free_list, next) {
1867 if (entry->base > valid_entry->base) {
1875 /* Try to merge with next one*/
1877 /* Merge with next one */
1878 if (valid_entry->base + valid_entry->len == next->base) {
1879 next->base = valid_entry->base;
1880 next->len += valid_entry->len;
1881 rte_free(valid_entry);
1888 /* Merge with previous one */
1889 if (prev->base + prev->len == valid_entry->base) {
1890 prev->len += valid_entry->len;
1891 /* If it merge with next one, remove next node */
1893 LIST_REMOVE(valid_entry, next);
1894 rte_free(valid_entry);
1896 rte_free(valid_entry);
1902 /* Not find any entry to merge, insert */
1905 LIST_INSERT_AFTER(prev, valid_entry, next);
1906 else if (next != NULL)
1907 LIST_INSERT_BEFORE(next, valid_entry, next);
1908 else /* It's empty list, insert to head */
1909 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
1912 pool->num_free += valid_entry->len;
1913 pool->num_alloc -= valid_entry->len;
1919 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
1922 struct pool_entry *entry, *valid_entry;
1924 if (pool == NULL || num == 0) {
1925 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1929 if (pool->num_free < num) {
1930 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
1931 num, pool->num_free);
1936 /* Lookup in free list and find most fit one */
1937 LIST_FOREACH(entry, &pool->free_list, next) {
1938 if (entry->len >= num) {
1940 if (entry->len == num) {
1941 valid_entry = entry;
1944 if (valid_entry == NULL || valid_entry->len > entry->len)
1945 valid_entry = entry;
1949 /* Not find one to satisfy the request, return */
1950 if (valid_entry == NULL) {
1951 PMD_DRV_LOG(ERR, "No valid entry found\n");
1955 * The entry have equal queue number as requested,
1956 * remove it from alloc_list.
1958 if (valid_entry->len == num) {
1959 LIST_REMOVE(valid_entry, next);
1962 * The entry have more numbers than requested,
1963 * create a new entry for alloc_list and minus its
1964 * queue base and number in free_list.
1966 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
1967 if (entry == NULL) {
1968 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1972 entry->base = valid_entry->base;
1974 valid_entry->base += num;
1975 valid_entry->len -= num;
1976 valid_entry = entry;
1979 /* Insert it into alloc list, not sorted */
1980 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
1982 pool->num_free -= valid_entry->len;
1983 pool->num_alloc += valid_entry->len;
1985 return (valid_entry->base + pool->base);
1989 * bitmap_is_subset - Check whether src2 is subset of src1
1992 bitmap_is_subset(uint8_t src1, uint8_t src2)
1994 return !((src1 ^ src2) & src2);
1998 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2000 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2002 /* If DCB is not supported, only default TC is supported */
2003 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2004 PMD_DRV_LOG(ERR, "DCB is not enabled, "
2005 "only TC0 is supported\n");
2009 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2010 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2011 "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
2015 return I40E_SUCCESS;
2019 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2020 struct i40e_vsi_vlan_pvid_info *info)
2023 struct i40e_vsi_context ctxt;
2024 uint8_t vlan_flags = 0;
2027 if (vsi == NULL || info == NULL) {
2028 PMD_DRV_LOG(ERR, "invalid parameters\n");
2029 return I40E_ERR_PARAM;
2033 vsi->info.pvid = info->config.pvid;
2035 * If insert pvid is enabled, only tagged pkts are
2036 * allowed to be sent out.
2038 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2039 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2042 if (info->config.reject.tagged == 0)
2043 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2045 if (info->config.reject.untagged == 0)
2046 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2048 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2049 I40E_AQ_VSI_PVLAN_MODE_MASK);
2050 vsi->info.port_vlan_flags |= vlan_flags;
2051 vsi->info.valid_sections =
2052 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2053 memset(&ctxt, 0, sizeof(ctxt));
2054 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2055 ctxt.seid = vsi->seid;
2057 hw = I40E_VSI_TO_HW(vsi);
2058 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2059 if (ret != I40E_SUCCESS)
2060 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2066 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2068 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2070 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2072 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2073 if (ret != I40E_SUCCESS)
2077 PMD_DRV_LOG(ERR, "seid not valid\n");
2081 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2082 tc_bw_data.tc_valid_bits = enabled_tcmap;
2083 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2084 tc_bw_data.tc_bw_credits[i] =
2085 (enabled_tcmap & (1 << i)) ? 1 : 0;
2087 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2088 if (ret != I40E_SUCCESS) {
2089 PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
2093 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2094 sizeof(vsi->info.qs_handle));
2095 return I40E_SUCCESS;
2099 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2100 struct i40e_aqc_vsi_properties_data *info,
2101 uint8_t enabled_tcmap)
2103 int ret, total_tc = 0, i;
2104 uint16_t qpnum_per_tc, bsf, qp_idx;
2106 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2107 if (ret != I40E_SUCCESS)
2110 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2111 if (enabled_tcmap & (1 << i))
2113 vsi->enabled_tc = enabled_tcmap;
2115 /* Number of queues per enabled TC */
2116 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2117 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2118 bsf = rte_bsf32(qpnum_per_tc);
2120 /* Adjust the queue number to actual queues that can be applied */
2121 vsi->nb_qps = qpnum_per_tc * total_tc;
2124 * Configure TC and queue mapping parameters, for enabled TC,
2125 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2126 * default queue will serve it.
2129 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2130 if (vsi->enabled_tc & (1 << i)) {
2131 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2132 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2133 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2134 qp_idx += qpnum_per_tc;
2136 info->tc_mapping[i] = 0;
2139 /* Associate queue number with VSI */
2140 if (vsi->type == I40E_VSI_SRIOV) {
2141 info->mapping_flags |=
2142 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2143 for (i = 0; i < vsi->nb_qps; i++)
2144 info->queue_mapping[i] =
2145 rte_cpu_to_le_16(vsi->base_queue + i);
2147 info->mapping_flags |=
2148 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2149 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2151 info->valid_sections =
2152 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2154 return I40E_SUCCESS;
2158 i40e_veb_release(struct i40e_veb *veb)
2160 struct i40e_vsi *vsi;
2163 if (veb == NULL || veb->associate_vsi == NULL)
2166 if (!TAILQ_EMPTY(&veb->head)) {
2167 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
2171 vsi = veb->associate_vsi;
2172 hw = I40E_VSI_TO_HW(vsi);
2174 vsi->uplink_seid = veb->uplink_seid;
2175 i40e_aq_delete_element(hw, veb->seid, NULL);
2178 return I40E_SUCCESS;
2182 static struct i40e_veb *
2183 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2185 struct i40e_veb *veb;
2189 if (NULL == pf || vsi == NULL) {
2190 PMD_DRV_LOG(ERR, "veb setup failed, "
2191 "associated VSI shouldn't null\n");
2194 hw = I40E_PF_TO_HW(pf);
2196 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2198 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
2202 veb->associate_vsi = vsi;
2203 TAILQ_INIT(&veb->head);
2204 veb->uplink_seid = vsi->uplink_seid;
2206 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2207 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2209 if (ret != I40E_SUCCESS) {
2210 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
2211 hw->aq.asq_last_status);
2215 /* get statistics index */
2216 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2217 &veb->stats_idx, NULL, NULL, NULL);
2218 if (ret != I40E_SUCCESS) {
2219 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
2220 hw->aq.asq_last_status);
2224 /* Get VEB bandwidth, to be implemented */
2225 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2226 vsi->uplink_seid = veb->seid;
2235 i40e_vsi_release(struct i40e_vsi *vsi)
2239 struct i40e_vsi_list *vsi_list;
2241 struct i40e_mac_filter *f;
2244 return I40E_SUCCESS;
2246 pf = I40E_VSI_TO_PF(vsi);
2247 hw = I40E_VSI_TO_HW(vsi);
2249 /* VSI has child to attach, release child first */
2251 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2252 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2254 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2256 i40e_veb_release(vsi->veb);
2259 /* Remove all macvlan filters of the VSI */
2260 i40e_vsi_remove_all_macvlan_filter(vsi);
2261 TAILQ_FOREACH(f, &vsi->mac_list, next)
2264 if (vsi->type != I40E_VSI_MAIN) {
2265 /* Remove vsi from parent's sibling list */
2266 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2267 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
2268 return I40E_ERR_PARAM;
2270 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2271 &vsi->sib_vsi_list, list);
2273 /* Remove all switch element of the VSI */
2274 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2275 if (ret != I40E_SUCCESS)
2276 PMD_DRV_LOG(ERR, "Failed to delete element\n");
2278 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2280 if (vsi->type != I40E_VSI_SRIOV)
2281 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2284 return I40E_SUCCESS;
2288 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2290 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2291 struct i40e_aqc_remove_macvlan_element_data def_filter;
2294 if (vsi->type != I40E_VSI_MAIN)
2295 return I40E_ERR_CONFIG;
2296 memset(&def_filter, 0, sizeof(def_filter));
2297 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2299 def_filter.vlan_tag = 0;
2300 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2301 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2302 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2303 if (ret != I40E_SUCCESS) {
2304 struct i40e_mac_filter *f;
2305 PMD_DRV_LOG(WARNING, "Failed to remove default [mac,vlan] config\n");
2307 /* Even failed to update default setting, still needs to add the permanent
2308 * mac into mac list.
2310 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2312 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
2313 return I40E_ERR_NO_MEMORY;
2315 (void)rte_memcpy(&f->macaddr.addr_bytes, hw->mac.perm_addr,
2317 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2322 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2326 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2328 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2329 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2330 struct i40e_hw *hw = &vsi->adapter->hw;
2334 memset(&bw_config, 0, sizeof(bw_config));
2335 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2336 if (ret != I40E_SUCCESS) {
2337 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
2338 "configuration %u\n", hw->aq.asq_last_status);
2342 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2343 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2344 &ets_sla_config, NULL);
2345 if (ret != I40E_SUCCESS) {
2346 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2347 "configuration %u\n", hw->aq.asq_last_status);
2351 /* Not store the info yet, just print out */
2352 PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
2353 PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
2354 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2355 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
2356 ets_sla_config.share_credits[i]);
2357 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
2358 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2359 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2360 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2369 i40e_vsi_setup(struct i40e_pf *pf,
2370 enum i40e_vsi_type type,
2371 struct i40e_vsi *uplink_vsi,
2372 uint16_t user_param)
2374 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2375 struct i40e_vsi *vsi;
2377 struct i40e_vsi_context ctxt;
2378 struct ether_addr broadcast =
2379 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2381 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2382 PMD_DRV_LOG(ERR, "VSI setup failed, "
2383 "VSI link shouldn't be NULL\n");
2387 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2388 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2389 "uplink VSI should be NULL\n");
2393 /* If uplink vsi didn't setup VEB, create one first */
2394 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2395 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2397 if (NULL == uplink_vsi->veb) {
2398 PMD_DRV_LOG(ERR, "VEB setup failed\n");
2403 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2405 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
2408 TAILQ_INIT(&vsi->mac_list);
2410 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2411 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2412 vsi->parent_vsi = uplink_vsi;
2413 vsi->user_param = user_param;
2414 /* Allocate queues */
2415 switch (vsi->type) {
2416 case I40E_VSI_MAIN :
2417 vsi->nb_qps = pf->lan_nb_qps;
2419 case I40E_VSI_SRIOV :
2420 vsi->nb_qps = pf->vf_nb_qps;
2425 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2427 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2431 vsi->base_queue = ret;
2433 /* VF has MSIX interrupt in VF range, don't allocate here */
2434 if (type != I40E_VSI_SRIOV) {
2435 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2437 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2438 goto fail_queue_alloc;
2440 vsi->msix_intr = ret;
2444 if (type == I40E_VSI_MAIN) {
2445 /* For main VSI, no need to add since it's default one */
2446 vsi->uplink_seid = pf->mac_seid;
2447 vsi->seid = pf->main_vsi_seid;
2448 /* Bind queues with specific MSIX interrupt */
2450 * Needs 2 interrupt at least, one for misc cause which will
2451 * enabled from OS side, Another for queues binding the
2452 * interrupt from device side only.
2455 /* Get default VSI parameters from hardware */
2456 memset(&ctxt, 0, sizeof(ctxt));
2457 ctxt.seid = vsi->seid;
2458 ctxt.pf_num = hw->pf_id;
2459 ctxt.uplink_seid = vsi->uplink_seid;
2461 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2462 if (ret != I40E_SUCCESS) {
2463 PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
2464 goto fail_msix_alloc;
2466 (void)rte_memcpy(&vsi->info, &ctxt.info,
2467 sizeof(struct i40e_aqc_vsi_properties_data));
2468 vsi->vsi_id = ctxt.vsi_number;
2469 vsi->info.valid_sections = 0;
2471 /* Configure tc, enabled TC0 only */
2472 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2474 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
2475 goto fail_msix_alloc;
2478 /* TC, queue mapping */
2479 memset(&ctxt, 0, sizeof(ctxt));
2480 vsi->info.valid_sections |=
2481 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2482 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2483 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2484 (void)rte_memcpy(&ctxt.info, &vsi->info,
2485 sizeof(struct i40e_aqc_vsi_properties_data));
2486 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2487 I40E_DEFAULT_TCMAP);
2488 if (ret != I40E_SUCCESS) {
2489 PMD_DRV_LOG(ERR, "Failed to configure "
2490 "TC queue mapping\n");
2491 goto fail_msix_alloc;
2493 ctxt.seid = vsi->seid;
2494 ctxt.pf_num = hw->pf_id;
2495 ctxt.uplink_seid = vsi->uplink_seid;
2498 /* Update VSI parameters */
2499 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2500 if (ret != I40E_SUCCESS) {
2501 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2502 goto fail_msix_alloc;
2505 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2506 sizeof(vsi->info.tc_mapping));
2507 (void)rte_memcpy(&vsi->info.queue_mapping,
2508 &ctxt.info.queue_mapping,
2509 sizeof(vsi->info.queue_mapping));
2510 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2511 vsi->info.valid_sections = 0;
2513 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2515 ret = i40e_update_default_filter_setting(vsi);
2516 if (ret != I40E_SUCCESS) {
2517 PMD_DRV_LOG(ERR, "Failed to remove default "
2518 "filter setting\n");
2519 goto fail_msix_alloc;
2522 else if (type == I40E_VSI_SRIOV) {
2523 memset(&ctxt, 0, sizeof(ctxt));
2525 * For other VSI, the uplink_seid equals to uplink VSI's
2526 * uplink_seid since they share same VEB
2528 vsi->uplink_seid = uplink_vsi->uplink_seid;
2529 ctxt.pf_num = hw->pf_id;
2530 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2531 ctxt.uplink_seid = vsi->uplink_seid;
2532 ctxt.connection_type = 0x1;
2533 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2535 /* Configure switch ID */
2536 ctxt.info.valid_sections |=
2537 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2538 ctxt.info.switch_id =
2539 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2540 /* Configure port/vlan */
2541 ctxt.info.valid_sections |=
2542 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2543 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2544 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2545 I40E_DEFAULT_TCMAP);
2546 if (ret != I40E_SUCCESS) {
2547 PMD_DRV_LOG(ERR, "Failed to configure "
2548 "TC queue mapping\n");
2549 goto fail_msix_alloc;
2551 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2552 ctxt.info.valid_sections |=
2553 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2555 * Since VSI is not created yet, only configure parameter,
2556 * will add vsi below.
2560 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
2561 goto fail_msix_alloc;
2564 if (vsi->type != I40E_VSI_MAIN) {
2565 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2567 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
2568 hw->aq.asq_last_status);
2569 goto fail_msix_alloc;
2571 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2572 vsi->info.valid_sections = 0;
2573 vsi->seid = ctxt.seid;
2574 vsi->vsi_id = ctxt.vsi_number;
2575 vsi->sib_vsi_list.vsi = vsi;
2576 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2577 &vsi->sib_vsi_list, list);
2580 /* MAC/VLAN configuration */
2581 ret = i40e_vsi_add_mac(vsi, &broadcast);
2582 if (ret != I40E_SUCCESS) {
2583 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
2584 goto fail_msix_alloc;
2587 /* Get VSI BW information */
2588 i40e_vsi_dump_bw_config(vsi);
2591 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2593 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2599 /* Configure vlan stripping on or off */
2601 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2603 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2604 struct i40e_vsi_context ctxt;
2606 int ret = I40E_SUCCESS;
2608 /* Check if it has been already on or off */
2609 if (vsi->info.valid_sections &
2610 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2612 if ((vsi->info.port_vlan_flags &
2613 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2614 return 0; /* already on */
2616 if ((vsi->info.port_vlan_flags &
2617 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2618 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2619 return 0; /* already off */
2624 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2626 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2627 vsi->info.valid_sections =
2628 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2629 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2630 vsi->info.port_vlan_flags |= vlan_flags;
2631 ctxt.seid = vsi->seid;
2632 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2633 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2635 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
2636 on ? "enable" : "disable");
2642 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2644 struct rte_eth_dev_data *data = dev->data;
2647 /* Apply vlan offload setting */
2648 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2650 /* Apply double-vlan setting, not implemented yet */
2652 /* Apply pvid setting */
2653 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2654 data->dev_conf.txmode.hw_vlan_insert_pvid);
2656 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
2662 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2664 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2666 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2670 i40e_update_flow_control(struct i40e_hw *hw)
2672 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2673 struct i40e_link_status link_status;
2674 uint32_t rxfc = 0, txfc = 0, reg;
2678 memset(&link_status, 0, sizeof(link_status));
2679 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2680 if (ret != I40E_SUCCESS) {
2681 PMD_DRV_LOG(ERR, "Failed to get link status information\n");
2682 goto write_reg; /* Disable flow control */
2685 an_info = hw->phy.link_info.an_info;
2686 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2687 PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
2688 ret = I40E_ERR_NOT_READY;
2689 goto write_reg; /* Disable flow control */
2692 * If link auto negotiation is enabled, flow control needs to
2693 * be configured according to it
2695 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2696 case I40E_LINK_PAUSE_RXTX:
2699 hw->fc.current_mode = I40E_FC_FULL;
2701 case I40E_AQ_LINK_PAUSE_RX:
2703 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2705 case I40E_AQ_LINK_PAUSE_TX:
2707 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2710 hw->fc.current_mode = I40E_FC_NONE;
2715 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2716 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2717 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2718 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2719 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2720 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2727 i40e_pf_setup(struct i40e_pf *pf)
2729 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2730 struct i40e_filter_control_settings settings;
2731 struct rte_eth_dev_data *dev_data = pf->dev_data;
2732 struct i40e_vsi *vsi;
2735 /* Clear all stats counters */
2736 pf->offset_loaded = FALSE;
2737 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2738 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2740 ret = i40e_pf_get_switch_config(pf);
2741 if (ret != I40E_SUCCESS) {
2742 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2747 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2749 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2750 return I40E_ERR_NOT_READY;
2753 dev_data->nb_rx_queues = vsi->nb_qps;
2754 dev_data->nb_tx_queues = vsi->nb_qps;
2756 /* Configure filter control */
2757 memset(&settings, 0, sizeof(settings));
2758 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2759 /* Enable ethtype and macvlan filters */
2760 settings.enable_ethtype = TRUE;
2761 settings.enable_macvlan = TRUE;
2762 ret = i40e_set_filter_control(hw, &settings);
2764 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2767 /* Update flow control according to the auto negotiation */
2768 i40e_update_flow_control(hw);
2770 return I40E_SUCCESS;
2774 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2779 /* Wait until the request is finished */
2780 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2781 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2782 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2783 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2784 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2790 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2791 return I40E_SUCCESS; /* already on, skip next steps */
2792 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2794 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2795 return I40E_SUCCESS; /* already off, skip next steps */
2796 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2798 /* Write the register */
2799 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2800 /* Check the result */
2801 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2802 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2803 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2805 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2806 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2809 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2810 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2814 /* Check if it is timeout */
2815 if (j >= I40E_CHK_Q_ENA_COUNT) {
2816 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
2817 (on ? "enable" : "disable"), q_idx);
2818 return I40E_ERR_TIMEOUT;
2820 return I40E_SUCCESS;
2822 /* Swith on or off the tx queues */
2824 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
2826 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2827 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2828 struct i40e_tx_queue *txq;
2832 pf_q = vsi->base_queue;
2833 for (i = 0; i < dev_data->nb_tx_queues; i++, pf_q++) {
2834 txq = dev_data->tx_queues[i];
2836 continue; /* Queue not configured */
2837 ret = i40e_switch_tx_queue(hw, pf_q, on);
2838 if ( ret != I40E_SUCCESS)
2842 return I40E_SUCCESS;
2846 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2851 /* Wait until the request is finished */
2852 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2853 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2854 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2855 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2856 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
2861 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2862 return I40E_SUCCESS; /* Already on, skip next steps */
2863 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2865 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2866 return I40E_SUCCESS; /* Already off, skip next steps */
2867 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2870 /* Write the register */
2871 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
2872 /* Check the result */
2873 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2874 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2875 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2877 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2878 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
2881 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2882 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2887 /* Check if it is timeout */
2888 if (j >= I40E_CHK_Q_ENA_COUNT) {
2889 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
2890 (on ? "enable" : "disable"), q_idx);
2891 return I40E_ERR_TIMEOUT;
2894 return I40E_SUCCESS;
2896 /* Switch on or off the rx queues */
2898 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
2900 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2901 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2902 struct i40e_rx_queue *rxq;
2906 pf_q = vsi->base_queue;
2907 for (i = 0; i < dev_data->nb_rx_queues; i++, pf_q++) {
2908 rxq = dev_data->rx_queues[i];
2910 continue; /* Queue not configured */
2911 ret = i40e_switch_rx_queue(hw, pf_q, on);
2912 if ( ret != I40E_SUCCESS)
2916 return I40E_SUCCESS;
2919 /* Switch on or off all the rx/tx queues */
2921 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
2926 /* enable rx queues before enabling tx queues */
2927 ret = i40e_vsi_switch_rx_queues(vsi, on);
2929 PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
2932 ret = i40e_vsi_switch_tx_queues(vsi, on);
2934 /* Stop tx queues before stopping rx queues */
2935 ret = i40e_vsi_switch_tx_queues(vsi, on);
2937 PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
2940 ret = i40e_vsi_switch_rx_queues(vsi, on);
2946 /* Initialize VSI for TX */
2948 i40e_vsi_tx_init(struct i40e_vsi *vsi)
2950 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2951 struct rte_eth_dev_data *data = pf->dev_data;
2953 uint32_t ret = I40E_SUCCESS;
2955 for (i = 0; i < data->nb_tx_queues; i++) {
2956 ret = i40e_tx_queue_init(data->tx_queues[i]);
2957 if (ret != I40E_SUCCESS)
2964 /* Initialize VSI for RX */
2966 i40e_vsi_rx_init(struct i40e_vsi *vsi)
2968 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2969 struct rte_eth_dev_data *data = pf->dev_data;
2970 int ret = I40E_SUCCESS;
2973 i40e_pf_config_mq_rx(pf);
2974 for (i = 0; i < data->nb_rx_queues; i++) {
2975 ret = i40e_rx_queue_init(data->rx_queues[i]);
2976 if (ret != I40E_SUCCESS) {
2977 PMD_DRV_LOG(ERR, "Failed to do RX queue "
2978 "initialization\n");
2986 /* Initialize VSI */
2988 i40e_vsi_init(struct i40e_vsi *vsi)
2992 err = i40e_vsi_tx_init(vsi);
2994 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
2997 err = i40e_vsi_rx_init(vsi);
2999 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
3007 i40e_stat_update_32(struct i40e_hw *hw,
3015 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3019 if (new_data >= *offset)
3020 *stat = (uint64_t)(new_data - *offset);
3022 *stat = (uint64_t)((new_data +
3023 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3027 i40e_stat_update_48(struct i40e_hw *hw,
3036 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3037 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3038 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3043 if (new_data >= *offset)
3044 *stat = new_data - *offset;
3046 *stat = (uint64_t)((new_data +
3047 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3049 *stat &= I40E_48_BIT_MASK;
3054 i40e_pf_disable_irq0(struct i40e_hw *hw)
3056 /* Disable all interrupt types */
3057 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3058 I40E_WRITE_FLUSH(hw);
3063 i40e_pf_enable_irq0(struct i40e_hw *hw)
3065 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3066 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3067 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3068 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3069 I40E_WRITE_FLUSH(hw);
3073 i40e_pf_config_irq0(struct i40e_hw *hw)
3077 /* read pending request and disable first */
3078 i40e_pf_disable_irq0(hw);
3080 * Enable all interrupt error options to detect possible errors,
3081 * other informative int are ignored
3083 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3084 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3085 I40E_PFINT_ICR0_ENA_GRST_MASK |
3086 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3087 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3088 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3089 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3090 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3092 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3093 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3094 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3096 /* Link no queues with irq0 */
3097 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3098 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3102 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3104 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3105 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3108 uint32_t index, offset, val;
3113 * Try to find which VF trigger a reset, use absolute VF id to access
3114 * since the reg is global register.
3116 for (i = 0; i < pf->vf_num; i++) {
3117 abs_vf_id = hw->func_caps.vf_base_id + i;
3118 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3119 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3120 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3121 /* VFR event occured */
3122 if (val & (0x1 << offset)) {
3125 /* Clear the event first */
3126 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3128 PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
3130 * Only notify a VF reset event occured,
3131 * don't trigger another SW reset
3133 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3134 if (ret != I40E_SUCCESS)
3135 PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
3141 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3143 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3144 struct i40e_arq_event_info info;
3145 uint16_t pending, opcode;
3148 info.msg_size = I40E_AQ_BUF_SZ;
3149 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3150 if (!info.msg_buf) {
3151 PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
3157 ret = i40e_clean_arq_element(hw, &info, &pending);
3159 if (ret != I40E_SUCCESS) {
3160 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3161 "aq_err: %u\n", hw->aq.asq_last_status);
3164 opcode = rte_le_to_cpu_16(info.desc.opcode);
3167 case i40e_aqc_opc_send_msg_to_pf:
3168 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3169 i40e_pf_host_handle_vf_msg(dev,
3170 rte_le_to_cpu_16(info.desc.retval),
3171 rte_le_to_cpu_32(info.desc.cookie_high),
3172 rte_le_to_cpu_32(info.desc.cookie_low),
3177 PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
3181 /* Reset the buffer after processing one */
3182 info.msg_size = I40E_AQ_BUF_SZ;
3184 rte_free(info.msg_buf);
3188 * Interrupt handler triggered by NIC for handling
3189 * specific interrupt.
3192 * Pointer to interrupt handle.
3194 * The address of parameter (struct rte_eth_dev *) regsitered before.
3200 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3203 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3204 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3205 uint32_t cause, enable;
3207 i40e_pf_disable_irq0(hw);
3209 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3210 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3212 /* Shared IRQ case, return */
3213 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3214 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3215 "no INT event to process\n", hw->pf_id);
3219 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3220 PMD_DRV_LOG(INFO, "INT:Link status changed\n");
3221 i40e_dev_link_update(dev, 0);
3224 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3225 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
3227 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3228 PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
3230 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3231 PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
3233 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3234 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
3236 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3237 PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
3239 /* Add processing func to deal with VF reset vent */
3240 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3241 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3242 i40e_dev_handle_vfr_event(dev);
3244 /* Find admin queue event */
3245 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3246 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3247 i40e_dev_handle_aq_msg(dev);
3251 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3252 /* Re-enable interrupt from device side */
3253 i40e_pf_enable_irq0(hw);
3254 /* Re-enable interrupt from host side */
3255 rte_intr_enable(&(dev->pci_dev->intr_handle));
3259 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3260 struct i40e_macvlan_filter *filter,
3263 int ele_num, ele_buff_size;
3264 int num, actual_num, i;
3265 int ret = I40E_SUCCESS;
3266 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3267 struct i40e_aqc_add_macvlan_element_data *req_list;
3269 if (filter == NULL || total == 0)
3270 return I40E_ERR_PARAM;
3271 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3272 ele_buff_size = hw->aq.asq_buf_size;
3274 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3275 if (req_list == NULL) {
3276 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3277 return I40E_ERR_NO_MEMORY;
3282 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3283 memset(req_list, 0, ele_buff_size);
3285 for (i = 0; i < actual_num; i++) {
3286 (void)rte_memcpy(req_list[i].mac_addr,
3287 &filter[num + i].macaddr, ETH_ADDR_LEN);
3288 req_list[i].vlan_tag =
3289 rte_cpu_to_le_16(filter[num + i].vlan_id);
3290 req_list[i].flags = rte_cpu_to_le_16(\
3291 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3292 req_list[i].queue_number = 0;
3295 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3297 if (ret != I40E_SUCCESS) {
3298 PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
3302 } while (num < total);
3310 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3311 struct i40e_macvlan_filter *filter,
3314 int ele_num, ele_buff_size;
3315 int num, actual_num, i;
3316 int ret = I40E_SUCCESS;
3317 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3318 struct i40e_aqc_remove_macvlan_element_data *req_list;
3320 if (filter == NULL || total == 0)
3321 return I40E_ERR_PARAM;
3323 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3324 ele_buff_size = hw->aq.asq_buf_size;
3326 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3327 if (req_list == NULL) {
3328 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3329 return I40E_ERR_NO_MEMORY;
3334 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3335 memset(req_list, 0, ele_buff_size);
3337 for (i = 0; i < actual_num; i++) {
3338 (void)rte_memcpy(req_list[i].mac_addr,
3339 &filter[num + i].macaddr, ETH_ADDR_LEN);
3340 req_list[i].vlan_tag =
3341 rte_cpu_to_le_16(filter[num + i].vlan_id);
3342 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3345 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3347 if (ret != I40E_SUCCESS) {
3348 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
3352 } while (num < total);
3359 /* Find out specific MAC filter */
3360 static struct i40e_mac_filter *
3361 i40e_find_mac_filter(struct i40e_vsi *vsi,
3362 struct ether_addr *macaddr)
3364 struct i40e_mac_filter *f;
3366 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3367 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3375 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3378 uint32_t vid_idx, vid_bit;
3380 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3381 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3383 if (vsi->vfta[vid_idx] & vid_bit)
3390 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3391 uint16_t vlan_id, bool on)
3393 uint32_t vid_idx, vid_bit;
3395 #define UINT32_BIT_MASK 0x1F
3396 #define VALID_VLAN_BIT_MASK 0xFFF
3397 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3398 * element first, then find the bits it belongs to
3400 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3402 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3405 vsi->vfta[vid_idx] |= vid_bit;
3407 vsi->vfta[vid_idx] &= ~vid_bit;
3411 * Find all vlan options for specific mac addr,
3412 * return with actual vlan found.
3415 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3416 struct i40e_macvlan_filter *mv_f,
3417 int num, struct ether_addr *addr)
3423 * Not to use i40e_find_vlan_filter to decrease the loop time,
3424 * although the code looks complex.
3426 if (num < vsi->vlan_num)
3427 return I40E_ERR_PARAM;
3430 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3432 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3433 if (vsi->vfta[j] & (1 << k)) {
3435 PMD_DRV_LOG(ERR, "vlan number "
3437 return I40E_ERR_PARAM;
3439 (void)rte_memcpy(&mv_f[i].macaddr,
3440 addr, ETH_ADDR_LEN);
3442 j * I40E_UINT32_BIT_SIZE + k;
3448 return I40E_SUCCESS;
3452 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3453 struct i40e_macvlan_filter *mv_f,
3458 struct i40e_mac_filter *f;
3460 if (num < vsi->mac_num)
3461 return I40E_ERR_PARAM;
3463 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3465 PMD_DRV_LOG(ERR, "buffer number not match\n");
3466 return I40E_ERR_PARAM;
3468 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3469 mv_f[i].vlan_id = vlan;
3473 return I40E_SUCCESS;
3477 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3480 struct i40e_mac_filter *f;
3481 struct i40e_macvlan_filter *mv_f;
3482 int ret = I40E_SUCCESS;
3484 if (vsi == NULL || vsi->mac_num == 0)
3485 return I40E_ERR_PARAM;
3487 /* Case that no vlan is set */
3488 if (vsi->vlan_num == 0)
3491 num = vsi->mac_num * vsi->vlan_num;
3493 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3495 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3496 return I40E_ERR_NO_MEMORY;
3500 if (vsi->vlan_num == 0) {
3501 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3502 (void)rte_memcpy(&mv_f[i].macaddr,
3503 &f->macaddr, ETH_ADDR_LEN);
3504 mv_f[i].vlan_id = 0;
3508 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3509 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3510 vsi->vlan_num, &f->macaddr);
3511 if (ret != I40E_SUCCESS)
3517 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3525 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3527 struct i40e_macvlan_filter *mv_f;
3529 int ret = I40E_SUCCESS;
3531 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3532 return I40E_ERR_PARAM;
3534 /* If it's already set, just return */
3535 if (i40e_find_vlan_filter(vsi,vlan))
3536 return I40E_SUCCESS;
3538 mac_num = vsi->mac_num;
3541 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3542 return I40E_ERR_PARAM;
3545 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3548 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3549 return I40E_ERR_NO_MEMORY;
3552 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3554 if (ret != I40E_SUCCESS)
3557 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3559 if (ret != I40E_SUCCESS)
3562 i40e_set_vlan_filter(vsi, vlan, 1);
3572 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3574 struct i40e_macvlan_filter *mv_f;
3576 int ret = I40E_SUCCESS;
3579 * Vlan 0 is the generic filter for untagged packets
3580 * and can't be removed.
3582 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3583 return I40E_ERR_PARAM;
3585 /* If can't find it, just return */
3586 if (!i40e_find_vlan_filter(vsi, vlan))
3587 return I40E_ERR_PARAM;
3589 mac_num = vsi->mac_num;
3592 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3593 return I40E_ERR_PARAM;
3596 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3599 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3600 return I40E_ERR_NO_MEMORY;
3603 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3605 if (ret != I40E_SUCCESS)
3608 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3610 if (ret != I40E_SUCCESS)
3613 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3614 if (vsi->vlan_num == 1) {
3615 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3616 if (ret != I40E_SUCCESS)
3619 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3620 if (ret != I40E_SUCCESS)
3624 i40e_set_vlan_filter(vsi, vlan, 0);
3634 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3636 struct i40e_mac_filter *f;
3637 struct i40e_macvlan_filter *mv_f;
3639 int ret = I40E_SUCCESS;
3641 /* If it's add and we've config it, return */
3642 f = i40e_find_mac_filter(vsi, addr);
3644 return I40E_SUCCESS;
3647 * If vlan_num is 0, that's the first time to add mac,
3648 * set mask for vlan_id 0.
3650 if (vsi->vlan_num == 0) {
3651 i40e_set_vlan_filter(vsi, 0, 1);
3655 vlan_num = vsi->vlan_num;
3657 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3659 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3660 return I40E_ERR_NO_MEMORY;
3663 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3664 if (ret != I40E_SUCCESS)
3667 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3668 if (ret != I40E_SUCCESS)
3671 /* Add the mac addr into mac list */
3672 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3674 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3675 ret = I40E_ERR_NO_MEMORY;
3678 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3679 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3690 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3692 struct i40e_mac_filter *f;
3693 struct i40e_macvlan_filter *mv_f;
3695 int ret = I40E_SUCCESS;
3697 /* Can't find it, return an error */
3698 f = i40e_find_mac_filter(vsi, addr);
3700 return I40E_ERR_PARAM;
3702 vlan_num = vsi->vlan_num;
3703 if (vlan_num == 0) {
3704 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
3705 return I40E_ERR_PARAM;
3707 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3709 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3710 return I40E_ERR_NO_MEMORY;
3713 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3714 if (ret != I40E_SUCCESS)
3717 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3718 if (ret != I40E_SUCCESS)
3721 /* Remove the mac addr into mac list */
3722 TAILQ_REMOVE(&vsi->mac_list, f, next);
3732 /* Configure hash enable flags for RSS */
3734 i40e_config_hena(uint64_t flags)
3741 if (flags & ETH_RSS_NONF_IPV4_UDP)
3742 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3743 if (flags & ETH_RSS_NONF_IPV4_TCP)
3744 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3745 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3746 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3747 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3748 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3749 if (flags & ETH_RSS_FRAG_IPV4)
3750 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3751 if (flags & ETH_RSS_NONF_IPV6_UDP)
3752 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3753 if (flags & ETH_RSS_NONF_IPV6_TCP)
3754 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3755 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3756 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3757 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3758 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3759 if (flags & ETH_RSS_FRAG_IPV6)
3760 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3761 if (flags & ETH_RSS_L2_PAYLOAD)
3762 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3767 /* Parse the hash enable flags */
3769 i40e_parse_hena(uint64_t flags)
3771 uint64_t rss_hf = 0;
3776 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3777 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3778 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3779 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3780 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3781 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3782 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3783 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3784 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3785 rss_hf |= ETH_RSS_FRAG_IPV4;
3786 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3787 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3788 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3789 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3790 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3791 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3792 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3793 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3794 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3795 rss_hf |= ETH_RSS_FRAG_IPV6;
3796 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3797 rss_hf |= ETH_RSS_L2_PAYLOAD;
3804 i40e_pf_disable_rss(struct i40e_pf *pf)
3806 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3809 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3810 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3811 hena &= ~I40E_RSS_HENA_ALL;
3812 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3813 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3814 I40E_WRITE_FLUSH(hw);
3818 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
3821 uint8_t hash_key_len;
3826 hash_key = (uint32_t *)(rss_conf->rss_key);
3827 hash_key_len = rss_conf->rss_key_len;
3828 if (hash_key != NULL && hash_key_len >=
3829 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3830 /* Fill in RSS hash key */
3831 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3832 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
3835 rss_hf = rss_conf->rss_hf;
3836 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3837 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3838 hena &= ~I40E_RSS_HENA_ALL;
3839 hena |= i40e_config_hena(rss_hf);
3840 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3841 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3842 I40E_WRITE_FLUSH(hw);
3848 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
3849 struct rte_eth_rss_conf *rss_conf)
3851 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3852 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
3855 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3856 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3857 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
3858 if (rss_hf != 0) /* Enable RSS */
3860 return 0; /* Nothing to do */
3863 if (rss_hf == 0) /* Disable RSS */
3866 return i40e_hw_rss_hash_set(hw, rss_conf);
3870 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3871 struct rte_eth_rss_conf *rss_conf)
3873 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3874 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
3878 if (hash_key != NULL) {
3879 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3880 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
3881 rss_conf->rss_key_len = i * sizeof(uint32_t);
3883 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3884 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3885 rss_conf->rss_hf = i40e_parse_hena(hena);
3892 i40e_pf_config_rss(struct i40e_pf *pf)
3894 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3895 struct rte_eth_rss_conf rss_conf;
3896 uint32_t i, lut = 0;
3897 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
3899 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
3902 lut = (lut << 8) | (j & ((0x1 <<
3903 hw->func_caps.rss_table_entry_width) - 1));
3905 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
3908 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
3909 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
3910 i40e_pf_disable_rss(pf);
3913 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
3914 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3915 /* Calculate the default hash key */
3916 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3917 rss_key_default[i] = (uint32_t)rte_rand();
3918 rss_conf.rss_key = (uint8_t *)rss_key_default;
3919 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3923 return i40e_hw_rss_hash_set(hw, &rss_conf);
3927 i40e_pf_config_mq_rx(struct i40e_pf *pf)
3929 if (!pf->dev_data->sriov.active) {
3930 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
3932 i40e_pf_config_rss(pf);
3935 i40e_pf_disable_rss(pf);
3944 i40e_disable_queue(struct i40e_hw *hw, uint16_t q_idx)
3949 /* Disable TX queue */
3950 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3951 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3952 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3953 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 0x1)))
3955 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3957 if (i >= I40E_CHK_Q_ENA_COUNT) {
3958 PMD_DRV_LOG(ERR, "Failed to disable "
3959 "tx queue[%u]\n", q_idx);
3960 return I40E_ERR_TIMEOUT;
3963 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3964 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3965 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3966 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3967 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3968 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3969 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3970 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3973 if (i >= I40E_CHK_Q_ENA_COUNT) {
3974 PMD_DRV_LOG(ERR, "Failed to disable "
3975 "tx queue[%u]\n", q_idx);
3976 return I40E_ERR_TIMEOUT;
3980 /* Disable RX queue */
3981 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3982 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3983 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3984 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3986 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3988 if (i >= I40E_CHK_Q_ENA_COUNT) {
3989 PMD_DRV_LOG(ERR, "Failed to disable "
3990 "rx queue[%u]\n", q_idx);
3991 return I40E_ERR_TIMEOUT;
3994 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3995 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3996 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3997 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3998 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3999 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
4000 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
4001 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
4004 if (i >= I40E_CHK_Q_ENA_COUNT) {
4005 PMD_DRV_LOG(ERR, "Failed to disable "
4006 "rx queue[%u]\n", q_idx);
4007 return I40E_ERR_TIMEOUT;
4011 return I40E_SUCCESS;
4015 i40e_pf_disable_all_queues(struct i40e_hw *hw)
4018 uint16_t firstq, lastq, maxq, i;
4020 reg = I40E_READ_REG(hw, I40E_PFLAN_QALLOC);
4021 if (!(reg & I40E_PFLAN_QALLOC_VALID_MASK)) {
4022 PMD_DRV_LOG(INFO, "PF queue allocation is invalid\n");
4023 return I40E_ERR_PARAM;
4025 firstq = reg & I40E_PFLAN_QALLOC_FIRSTQ_MASK;
4026 lastq = (reg & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4027 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4028 maxq = lastq - firstq;
4029 for (i = 0; i <= maxq; i++) {
4030 ret = i40e_disable_queue(hw, i);
4031 if (ret != I40E_SUCCESS)
4034 return I40E_SUCCESS;