4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
52 #include "i40e_logs.h"
53 #include "i40e/i40e_register_x710_int.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 /* Maximun number of MAC addresses */
62 #define I40E_NUM_MACADDR_MAX 64
63 #define I40E_CLEAR_PXE_WAIT_MS 200
65 /* Maximun number of capability elements */
66 #define I40E_MAX_CAP_ELE_NUM 128
68 /* Wait count and inteval */
69 #define I40E_CHK_Q_ENA_COUNT 1000
70 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
72 /* Maximun number of VSI */
73 #define I40E_MAX_NUM_VSIS (384UL)
75 /* Bit shift and mask */
76 #define I40E_16_BIT_SHIFT 16
77 #define I40E_16_BIT_MASK 0xFFFF
78 #define I40E_32_BIT_SHIFT 32
79 #define I40E_32_BIT_MASK 0xFFFFFFFF
80 #define I40E_48_BIT_SHIFT 48
81 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
83 /* Default queue interrupt throttling time in microseconds*/
84 #define I40E_ITR_INDEX_DEFAULT 0
85 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
86 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
88 #define I40E_RSS_OFFLOAD_ALL ( \
89 ETH_RSS_NONF_IPV4_UDP | \
90 ETH_RSS_NONF_IPV4_TCP | \
91 ETH_RSS_NONF_IPV4_SCTP | \
92 ETH_RSS_NONF_IPV4_OTHER | \
94 ETH_RSS_NONF_IPV6_UDP | \
95 ETH_RSS_NONF_IPV6_TCP | \
96 ETH_RSS_NONF_IPV6_SCTP | \
97 ETH_RSS_NONF_IPV6_OTHER | \
101 /* All bits of RSS hash enable */
102 #define I40E_RSS_HENA_ALL ( \
103 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
104 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
105 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
107 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
108 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
109 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
110 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
111 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
112 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
113 (1ULL << I40E_FILTER_PCTYPE_FCOE_OX) | \
114 (1ULL << I40E_FILTER_PCTYPE_FCOE_RX) | \
115 (1ULL << I40E_FILTER_PCTYPE_FCOE_OTHER) | \
116 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
118 static int eth_i40e_dev_init(\
119 __attribute__((unused)) struct eth_driver *eth_drv,
120 struct rte_eth_dev *eth_dev);
121 static int i40e_dev_configure(struct rte_eth_dev *dev);
122 static int i40e_dev_start(struct rte_eth_dev *dev);
123 static void i40e_dev_stop(struct rte_eth_dev *dev);
124 static void i40e_dev_close(struct rte_eth_dev *dev);
125 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
126 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
127 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
128 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
129 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
130 struct rte_eth_stats *stats);
131 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
132 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
136 static void i40e_dev_info_get(struct rte_eth_dev *dev,
137 struct rte_eth_dev_info *dev_info);
138 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
141 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
142 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
143 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
146 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
147 static int i40e_dev_led_on(struct rte_eth_dev *dev);
148 static int i40e_dev_led_off(struct rte_eth_dev *dev);
149 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
150 struct rte_eth_fc_conf *fc_conf);
151 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
152 struct rte_eth_pfc_conf *pfc_conf);
153 static void i40e_macaddr_add(struct rte_eth_dev *dev,
154 struct ether_addr *mac_addr,
157 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
158 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
159 struct rte_eth_rss_reta *reta_conf);
160 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
161 struct rte_eth_rss_reta *reta_conf);
163 static int i40e_get_cap(struct i40e_hw *hw);
164 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
165 static int i40e_pf_setup(struct i40e_pf *pf);
166 static int i40e_vsi_init(struct i40e_vsi *vsi);
167 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
168 bool offset_loaded, uint64_t *offset, uint64_t *stat);
169 static void i40e_stat_update_48(struct i40e_hw *hw,
175 static void i40e_pf_config_irq0(struct i40e_hw *hw);
176 static void i40e_dev_interrupt_handler(
177 __rte_unused struct rte_intr_handle *handle, void *param);
178 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
179 uint32_t base, uint32_t num);
180 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
181 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
183 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
185 static int i40e_vsi_init_vlan(struct i40e_vsi *vsi);
186 static int i40e_veb_release(struct i40e_veb *veb);
187 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
188 struct i40e_vsi *vsi);
189 static int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
190 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
191 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
192 static int i40e_pf_disable_all_queues(struct i40e_hw *hw);
193 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
194 struct i40e_macvlan_filter *mv_f,
196 struct ether_addr *addr);
197 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
198 struct i40e_macvlan_filter *mv_f,
201 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
202 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
203 struct rte_eth_rss_conf *rss_conf);
204 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
205 struct rte_eth_rss_conf *rss_conf);
207 /* Default hash key buffer for RSS */
208 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
210 static struct rte_pci_id pci_id_i40e_map[] = {
211 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
212 #include "rte_pci_dev_ids.h"
213 { .vendor_id = 0, /* sentinel */ },
216 static struct eth_dev_ops i40e_eth_dev_ops = {
217 .dev_configure = i40e_dev_configure,
218 .dev_start = i40e_dev_start,
219 .dev_stop = i40e_dev_stop,
220 .dev_close = i40e_dev_close,
221 .promiscuous_enable = i40e_dev_promiscuous_enable,
222 .promiscuous_disable = i40e_dev_promiscuous_disable,
223 .allmulticast_enable = i40e_dev_allmulticast_enable,
224 .allmulticast_disable = i40e_dev_allmulticast_disable,
225 .link_update = i40e_dev_link_update,
226 .stats_get = i40e_dev_stats_get,
227 .stats_reset = i40e_dev_stats_reset,
228 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
229 .dev_infos_get = i40e_dev_info_get,
230 .vlan_filter_set = i40e_vlan_filter_set,
231 .vlan_tpid_set = i40e_vlan_tpid_set,
232 .vlan_offload_set = i40e_vlan_offload_set,
233 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
234 .vlan_pvid_set = i40e_vlan_pvid_set,
235 .rx_queue_setup = i40e_dev_rx_queue_setup,
236 .rx_queue_release = i40e_dev_rx_queue_release,
237 .rx_queue_count = i40e_dev_rx_queue_count,
238 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
239 .tx_queue_setup = i40e_dev_tx_queue_setup,
240 .tx_queue_release = i40e_dev_tx_queue_release,
241 .dev_led_on = i40e_dev_led_on,
242 .dev_led_off = i40e_dev_led_off,
243 .flow_ctrl_set = i40e_flow_ctrl_set,
244 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
245 .mac_addr_add = i40e_macaddr_add,
246 .mac_addr_remove = i40e_macaddr_remove,
247 .reta_update = i40e_dev_rss_reta_update,
248 .reta_query = i40e_dev_rss_reta_query,
249 .rss_hash_update = i40e_dev_rss_hash_update,
250 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
253 static struct eth_driver rte_i40e_pmd = {
255 .name = "rte_i40e_pmd",
256 .id_table = pci_id_i40e_map,
257 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
259 .eth_dev_init = eth_i40e_dev_init,
260 .dev_private_size = sizeof(struct i40e_adapter),
264 i40e_prev_power_of_2(int n)
282 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
283 struct rte_eth_link *link)
285 struct rte_eth_link *dst = link;
286 struct rte_eth_link *src = &(dev->data->dev_link);
288 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
289 *(uint64_t *)src) == 0)
296 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
297 struct rte_eth_link *link)
299 struct rte_eth_link *dst = &(dev->data->dev_link);
300 struct rte_eth_link *src = link;
302 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
303 *(uint64_t *)src) == 0)
310 * Driver initialization routine.
311 * Invoked once at EAL init time.
312 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
315 rte_i40e_pmd_init(const char *name __rte_unused,
316 const char *params __rte_unused)
318 PMD_INIT_FUNC_TRACE();
319 rte_eth_driver_register(&rte_i40e_pmd);
324 static struct rte_driver rte_i40e_driver = {
326 .init = rte_i40e_pmd_init,
329 PMD_REGISTER_DRIVER(rte_i40e_driver);
332 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
333 struct rte_eth_dev *dev)
335 struct rte_pci_device *pci_dev;
336 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
337 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
338 struct i40e_vsi *vsi;
343 PMD_INIT_FUNC_TRACE();
345 dev->dev_ops = &i40e_eth_dev_ops;
346 dev->rx_pkt_burst = i40e_recv_pkts;
347 dev->tx_pkt_burst = i40e_xmit_pkts;
349 /* for secondary processes, we don't initialise any further as primary
350 * has already done this work. Only check we don't need a different
352 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
353 if (dev->data->scattered_rx)
354 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
357 pci_dev = dev->pci_dev;
358 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
359 pf->adapter->eth_dev = dev;
360 pf->dev_data = dev->data;
362 hw->back = I40E_PF_TO_ADAPTER(pf);
363 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
365 PMD_INIT_LOG(ERR, "Hardware is not available, "
366 "as address is NULL\n");
370 hw->vendor_id = pci_dev->id.vendor_id;
371 hw->device_id = pci_dev->id.device_id;
372 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
373 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
374 hw->bus.device = pci_dev->addr.devid;
375 hw->bus.func = pci_dev->addr.function;
377 /* Disable all queues before PF reset, as required */
378 ret = i40e_pf_disable_all_queues(hw);
379 if (ret != I40E_SUCCESS) {
380 PMD_INIT_LOG(ERR, "Failed to disable queues %u\n", ret);
384 /* Reset here to make sure all is clean for each PF */
385 ret = i40e_pf_reset(hw);
387 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
391 /* Initialize the shared code */
392 ret = i40e_init_shared_code(hw);
394 PMD_INIT_LOG(ERR, "Failed to init shared code: %d", ret);
398 /* Initialize the parameters for adminq */
399 i40e_init_adminq_parameter(hw);
400 ret = i40e_init_adminq(hw);
401 if (ret != I40E_SUCCESS) {
402 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
405 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
406 "%02d.%02d.%02d eetrack %04x\n",
407 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
408 hw->aq.api_maj_ver, hw->aq.api_min_ver,
409 ((hw->nvm.version >> 12) & 0xf),
410 ((hw->nvm.version >> 4) & 0xff),
411 (hw->nvm.version & 0xf), hw->nvm.eetrack);
414 ret = i40e_aq_stop_lldp(hw, true, NULL);
415 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
416 PMD_INIT_LOG(INFO, "Failed to stop lldp\n");
419 i40e_clear_pxe_mode(hw);
421 /* Get hw capabilities */
422 ret = i40e_get_cap(hw);
423 if (ret != I40E_SUCCESS) {
424 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
425 goto err_get_capabilities;
428 /* Initialize parameters for PF */
429 ret = i40e_pf_parameter_init(dev);
431 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
432 goto err_parameter_init;
435 /* Initialize the queue management */
436 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
438 PMD_INIT_LOG(ERR, "Failed to init queue pool\n");
439 goto err_qp_pool_init;
441 ret = i40e_res_pool_init(&pf->msix_pool, 1,
442 hw->func_caps.num_msix_vectors - 1);
444 PMD_INIT_LOG(ERR, "Failed to init MSIX pool\n");
445 goto err_msix_pool_init;
448 /* Initialize lan hmc */
449 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
450 hw->func_caps.num_rx_qp, 0, 0);
451 if (ret != I40E_SUCCESS) {
452 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
453 goto err_init_lan_hmc;
456 /* Configure lan hmc */
457 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
458 if (ret != I40E_SUCCESS) {
459 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
460 goto err_configure_lan_hmc;
463 /* Get and check the mac address */
464 i40e_get_mac_addr(hw, hw->mac.addr);
465 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
466 PMD_INIT_LOG(ERR, "mac address is not valid");
468 goto err_get_mac_addr;
470 /* Copy the permanent MAC address */
471 ether_addr_copy((struct ether_addr *) hw->mac.addr,
472 (struct ether_addr *) hw->mac.perm_addr);
474 /* Disable flow control */
475 hw->fc.requested_mode = I40E_FC_NONE;
476 i40e_set_fc(hw, &aq_fail, TRUE);
478 /* PF setup, which includes VSI setup */
479 ret = i40e_pf_setup(pf);
481 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
482 goto err_setup_pf_switch;
486 if (!vsi->max_macaddrs)
487 len = ETHER_ADDR_LEN;
489 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
491 /* Should be after VSI initialized */
492 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
493 if (!dev->data->mac_addrs) {
494 PMD_INIT_LOG(ERR, "Failed to allocated memory "
495 "for storing mac address");
496 goto err_get_mac_addr;
498 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
499 &dev->data->mac_addrs[0]);
501 /* initialize pf host driver to setup SRIOV resource if applicable */
502 i40e_pf_host_init(dev);
504 /* register callback func to eal lib */
505 rte_intr_callback_register(&(pci_dev->intr_handle),
506 i40e_dev_interrupt_handler, (void *)dev);
508 /* configure and enable device interrupt */
509 i40e_pf_config_irq0(hw);
510 i40e_pf_enable_irq0(hw);
512 /* enable uio intr after callback register */
513 rte_intr_enable(&(pci_dev->intr_handle));
518 rte_free(pf->main_vsi);
520 err_configure_lan_hmc:
521 (void)i40e_shutdown_lan_hmc(hw);
523 i40e_res_pool_destroy(&pf->msix_pool);
525 i40e_res_pool_destroy(&pf->qp_pool);
528 err_get_capabilities:
529 (void)i40e_shutdown_adminq(hw);
535 i40e_dev_configure(struct rte_eth_dev *dev)
537 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
538 struct i40e_vsi *vsi = pf->main_vsi;
541 ret = i40e_vsi_init_vlan(vsi);
547 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
549 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
550 uint16_t msix_vect = vsi->msix_intr;
553 for (i = 0; i < vsi->nb_qps; i++) {
554 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
555 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
559 if (vsi->type != I40E_VSI_SRIOV) {
560 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
561 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
565 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
566 vsi->user_param + (msix_vect - 1);
568 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
570 I40E_WRITE_FLUSH(hw);
573 static inline uint16_t
574 i40e_calc_itr_interval(int16_t interval)
576 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
577 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
579 /* Convert to hardware count, as writing each 1 represents 2 us */
584 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
587 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
588 uint16_t msix_vect = vsi->msix_intr;
589 uint16_t interval = i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
592 for (i = 0; i < vsi->nb_qps; i++)
593 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
595 /* Bind all RX queues to allocated MSIX interrupt */
596 for (i = 0; i < vsi->nb_qps; i++) {
597 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
598 (interval << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
599 ((vsi->base_queue + i + 1) <<
600 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
601 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
602 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
604 if (i == vsi->nb_qps - 1)
605 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
606 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
609 /* Write first RX queue to Link list register as the head element */
610 if (vsi->type != I40E_VSI_SRIOV) {
611 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
612 (vsi->base_queue << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
613 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
615 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
616 msix_vect - 1), interval);
618 /* Disable auto-mask on enabling of all none-zero interrupt */
619 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
620 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
624 /* num_msix_vectors_vf needs to minus irq0 */
625 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
626 vsi->user_param + (msix_vect - 1);
628 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
629 (vsi->base_queue << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
630 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
633 I40E_WRITE_FLUSH(hw);
637 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
639 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
640 uint16_t interval = i40e_calc_itr_interval(\
641 RTE_LIBRTE_I40E_ITR_INTERVAL);
643 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
644 I40E_PFINT_DYN_CTLN_INTENA_MASK |
645 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
646 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
647 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
651 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
653 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
655 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
659 i40e_dev_start(struct rte_eth_dev *dev)
661 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
663 struct i40e_vsi *vsi = pf->main_vsi;
667 ret = i40e_vsi_init(vsi);
668 if (ret != I40E_SUCCESS) {
669 PMD_DRV_LOG(ERR, "Failed to init VSI\n");
673 /* Map queues with MSIX interrupt */
674 i40e_vsi_queues_bind_intr(vsi);
675 i40e_vsi_enable_queues_intr(vsi);
677 /* Enable all queues which have been configured */
678 ret = i40e_vsi_switch_queues(vsi, TRUE);
679 if (ret != I40E_SUCCESS) {
680 PMD_DRV_LOG(ERR, "Failed to enable VSI\n");
684 /* Enable receiving broadcast packets */
685 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
686 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
687 if (ret != I40E_SUCCESS)
688 PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
694 i40e_vsi_switch_queues(vsi, FALSE);
695 i40e_dev_clear_queues(dev);
701 i40e_dev_stop(struct rte_eth_dev *dev)
703 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
704 struct i40e_vsi *vsi = pf->main_vsi;
706 /* Disable all queues */
707 i40e_vsi_switch_queues(vsi, FALSE);
709 /* Clear all queues and release memory */
710 i40e_dev_clear_queues(dev);
712 /* un-map queues with interrupt registers */
713 i40e_vsi_disable_queues_intr(vsi);
714 i40e_vsi_queues_unbind_intr(vsi);
718 i40e_dev_close(struct rte_eth_dev *dev)
720 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
721 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
724 PMD_INIT_FUNC_TRACE();
728 /* Disable interrupt */
729 i40e_pf_disable_irq0(hw);
730 rte_intr_disable(&(dev->pci_dev->intr_handle));
732 /* shutdown and destroy the HMC */
733 i40e_shutdown_lan_hmc(hw);
735 /* release all the existing VSIs and VEBs */
736 i40e_vsi_release(pf->main_vsi);
738 /* shutdown the adminq */
739 i40e_aq_queue_shutdown(hw, true);
740 i40e_shutdown_adminq(hw);
742 i40e_res_pool_destroy(&pf->qp_pool);
744 /* force a PF reset to clean anything leftover */
745 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
746 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
747 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
748 I40E_WRITE_FLUSH(hw);
752 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
754 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
755 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
756 struct i40e_vsi *vsi = pf->main_vsi;
759 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
761 if (status != I40E_SUCCESS)
762 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous\n");
766 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
768 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
769 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
770 struct i40e_vsi *vsi = pf->main_vsi;
773 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
775 if (status != I40E_SUCCESS)
776 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous\n");
780 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
782 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
783 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
784 struct i40e_vsi *vsi = pf->main_vsi;
787 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
788 if (ret != I40E_SUCCESS)
789 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous\n");
793 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
795 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
796 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
797 struct i40e_vsi *vsi = pf->main_vsi;
800 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
801 vsi->seid, FALSE, NULL);
802 if (ret != I40E_SUCCESS)
803 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous\n");
807 i40e_dev_link_update(struct rte_eth_dev *dev,
808 __rte_unused int wait_to_complete)
810 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811 struct i40e_link_status link_status;
812 struct rte_eth_link link, old;
815 memset(&link, 0, sizeof(link));
816 memset(&old, 0, sizeof(old));
817 memset(&link_status, 0, sizeof(link_status));
818 rte_i40e_dev_atomic_read_link_status(dev, &old);
820 /* Get link status information from hardware */
821 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
822 if (status != I40E_SUCCESS) {
823 link.link_speed = ETH_LINK_SPEED_100;
824 link.link_duplex = ETH_LINK_FULL_DUPLEX;
825 PMD_DRV_LOG(ERR, "Failed to get link info\n");
829 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
831 if (!link.link_status)
834 /* i40e uses full duplex only */
835 link.link_duplex = ETH_LINK_FULL_DUPLEX;
837 /* Parse the link status */
838 switch (link_status.link_speed) {
839 case I40E_LINK_SPEED_100MB:
840 link.link_speed = ETH_LINK_SPEED_100;
842 case I40E_LINK_SPEED_1GB:
843 link.link_speed = ETH_LINK_SPEED_1000;
845 case I40E_LINK_SPEED_10GB:
846 link.link_speed = ETH_LINK_SPEED_10G;
848 case I40E_LINK_SPEED_20GB:
849 link.link_speed = ETH_LINK_SPEED_20G;
851 case I40E_LINK_SPEED_40GB:
852 link.link_speed = ETH_LINK_SPEED_40G;
855 link.link_speed = ETH_LINK_SPEED_100;
860 rte_i40e_dev_atomic_write_link_status(dev, &link);
861 if (link.link_status == old.link_status)
867 /* Get all the statistics of a VSI */
869 i40e_update_vsi_stats(struct i40e_vsi *vsi)
871 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
872 struct i40e_eth_stats *nes = &vsi->eth_stats;
873 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
874 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
876 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
877 vsi->offset_loaded, &oes->rx_bytes,
879 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
880 vsi->offset_loaded, &oes->rx_unicast,
882 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
883 vsi->offset_loaded, &oes->rx_multicast,
885 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
886 vsi->offset_loaded, &oes->rx_broadcast,
888 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
889 &oes->rx_discards, &nes->rx_discards);
890 /* GLV_REPC not supported */
891 /* GLV_RMPC not supported */
892 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
893 &oes->rx_unknown_protocol,
894 &nes->rx_unknown_protocol);
895 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
896 vsi->offset_loaded, &oes->tx_bytes,
898 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
899 vsi->offset_loaded, &oes->tx_unicast,
901 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
902 vsi->offset_loaded, &oes->tx_multicast,
904 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
905 vsi->offset_loaded, &oes->tx_broadcast,
907 /* GLV_TDPC not supported */
908 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
909 &oes->tx_errors, &nes->tx_errors);
910 vsi->offset_loaded = true;
912 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
913 printf("***************** VSI[%u] stats start *******************\n",
915 printf("rx_bytes: %lu\n", nes->rx_bytes);
916 printf("rx_unicast: %lu\n", nes->rx_unicast);
917 printf("rx_multicast: %lu\n", nes->rx_multicast);
918 printf("rx_broadcast: %lu\n", nes->rx_broadcast);
919 printf("rx_discards: %lu\n", nes->rx_discards);
920 printf("rx_unknown_protocol: %lu\n", nes->rx_unknown_protocol);
921 printf("tx_bytes: %lu\n", nes->tx_bytes);
922 printf("tx_unicast: %lu\n", nes->tx_unicast);
923 printf("tx_multicast: %lu\n", nes->tx_multicast);
924 printf("tx_broadcast: %lu\n", nes->tx_broadcast);
925 printf("tx_discards: %lu\n", nes->tx_discards);
926 printf("tx_errors: %lu\n", nes->tx_errors);
927 printf("***************** VSI[%u] stats end *******************\n",
929 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
932 /* Get all statistics of a port */
934 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
937 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
938 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
939 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
940 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
942 /* Get statistics of struct i40e_eth_stats */
943 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
944 I40E_GLPRT_GORCL(hw->port),
945 pf->offset_loaded, &os->eth.rx_bytes,
947 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
948 I40E_GLPRT_UPRCL(hw->port),
949 pf->offset_loaded, &os->eth.rx_unicast,
950 &ns->eth.rx_unicast);
951 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
952 I40E_GLPRT_MPRCL(hw->port),
953 pf->offset_loaded, &os->eth.rx_multicast,
954 &ns->eth.rx_multicast);
955 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
956 I40E_GLPRT_BPRCL(hw->port),
957 pf->offset_loaded, &os->eth.rx_broadcast,
958 &ns->eth.rx_broadcast);
959 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
960 pf->offset_loaded, &os->eth.rx_discards,
961 &ns->eth.rx_discards);
962 /* GLPRT_REPC not supported */
963 /* GLPRT_RMPC not supported */
964 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
966 &os->eth.rx_unknown_protocol,
967 &ns->eth.rx_unknown_protocol);
968 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
969 I40E_GLPRT_GOTCL(hw->port),
970 pf->offset_loaded, &os->eth.tx_bytes,
972 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
973 I40E_GLPRT_UPTCL(hw->port),
974 pf->offset_loaded, &os->eth.tx_unicast,
975 &ns->eth.tx_unicast);
976 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
977 I40E_GLPRT_MPTCL(hw->port),
978 pf->offset_loaded, &os->eth.tx_multicast,
979 &ns->eth.tx_multicast);
980 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
981 I40E_GLPRT_BPTCL(hw->port),
982 pf->offset_loaded, &os->eth.tx_broadcast,
983 &ns->eth.tx_broadcast);
984 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
985 pf->offset_loaded, &os->eth.tx_discards,
986 &ns->eth.tx_discards);
987 /* GLPRT_TEPC not supported */
989 /* additional port specific stats */
990 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
991 pf->offset_loaded, &os->tx_dropped_link_down,
992 &ns->tx_dropped_link_down);
993 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
994 pf->offset_loaded, &os->crc_errors,
996 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
997 pf->offset_loaded, &os->illegal_bytes,
999 /* GLPRT_ERRBC not supported */
1000 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1001 pf->offset_loaded, &os->mac_local_faults,
1002 &ns->mac_local_faults);
1003 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1004 pf->offset_loaded, &os->mac_remote_faults,
1005 &ns->mac_remote_faults);
1006 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1007 pf->offset_loaded, &os->rx_length_errors,
1008 &ns->rx_length_errors);
1009 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1010 pf->offset_loaded, &os->link_xon_rx,
1012 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1013 pf->offset_loaded, &os->link_xoff_rx,
1015 for (i = 0; i < 8; i++) {
1016 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1018 &os->priority_xon_rx[i],
1019 &ns->priority_xon_rx[i]);
1020 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1022 &os->priority_xoff_rx[i],
1023 &ns->priority_xoff_rx[i]);
1025 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1026 pf->offset_loaded, &os->link_xon_tx,
1028 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1029 pf->offset_loaded, &os->link_xoff_tx,
1031 for (i = 0; i < 8; i++) {
1032 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1034 &os->priority_xon_tx[i],
1035 &ns->priority_xon_tx[i]);
1036 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1038 &os->priority_xoff_tx[i],
1039 &ns->priority_xoff_tx[i]);
1040 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1042 &os->priority_xon_2_xoff[i],
1043 &ns->priority_xon_2_xoff[i]);
1045 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1046 I40E_GLPRT_PRC64L(hw->port),
1047 pf->offset_loaded, &os->rx_size_64,
1049 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1050 I40E_GLPRT_PRC127L(hw->port),
1051 pf->offset_loaded, &os->rx_size_127,
1053 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1054 I40E_GLPRT_PRC255L(hw->port),
1055 pf->offset_loaded, &os->rx_size_255,
1057 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1058 I40E_GLPRT_PRC511L(hw->port),
1059 pf->offset_loaded, &os->rx_size_511,
1061 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1062 I40E_GLPRT_PRC1023L(hw->port),
1063 pf->offset_loaded, &os->rx_size_1023,
1065 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1066 I40E_GLPRT_PRC1522L(hw->port),
1067 pf->offset_loaded, &os->rx_size_1522,
1069 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1070 I40E_GLPRT_PRC9522L(hw->port),
1071 pf->offset_loaded, &os->rx_size_big,
1073 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1074 pf->offset_loaded, &os->rx_undersize,
1076 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1077 pf->offset_loaded, &os->rx_fragments,
1079 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1080 pf->offset_loaded, &os->rx_oversize,
1082 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1083 pf->offset_loaded, &os->rx_jabber,
1085 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1086 I40E_GLPRT_PTC64L(hw->port),
1087 pf->offset_loaded, &os->tx_size_64,
1089 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1090 I40E_GLPRT_PTC127L(hw->port),
1091 pf->offset_loaded, &os->tx_size_127,
1093 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1094 I40E_GLPRT_PTC255L(hw->port),
1095 pf->offset_loaded, &os->tx_size_255,
1097 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1098 I40E_GLPRT_PTC511L(hw->port),
1099 pf->offset_loaded, &os->tx_size_511,
1101 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1102 I40E_GLPRT_PTC1023L(hw->port),
1103 pf->offset_loaded, &os->tx_size_1023,
1105 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1106 I40E_GLPRT_PTC1522L(hw->port),
1107 pf->offset_loaded, &os->tx_size_1522,
1109 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1110 I40E_GLPRT_PTC9522L(hw->port),
1111 pf->offset_loaded, &os->tx_size_big,
1113 /* GLPRT_MSPDC not supported */
1114 /* GLPRT_XEC not supported */
1116 pf->offset_loaded = true;
1118 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1119 ns->eth.rx_broadcast;
1120 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1121 ns->eth.tx_broadcast;
1122 stats->ibytes = ns->eth.rx_bytes;
1123 stats->obytes = ns->eth.tx_bytes;
1124 stats->oerrors = ns->eth.tx_errors;
1125 stats->imcasts = ns->eth.rx_multicast;
1128 i40e_update_vsi_stats(pf->main_vsi);
1130 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
1131 printf("***************** PF stats start *******************\n");
1132 printf("rx_bytes: %lu\n", ns->eth.rx_bytes);
1133 printf("rx_unicast: %lu\n", ns->eth.rx_unicast);
1134 printf("rx_multicast: %lu\n", ns->eth.rx_multicast);
1135 printf("rx_broadcast: %lu\n", ns->eth.rx_broadcast);
1136 printf("rx_discards: %lu\n", ns->eth.rx_discards);
1137 printf("rx_unknown_protocol: %lu\n", ns->eth.rx_unknown_protocol);
1138 printf("tx_bytes: %lu\n", ns->eth.tx_bytes);
1139 printf("tx_unicast: %lu\n", ns->eth.tx_unicast);
1140 printf("tx_multicast: %lu\n", ns->eth.tx_multicast);
1141 printf("tx_broadcast: %lu\n", ns->eth.tx_broadcast);
1142 printf("tx_discards: %lu\n", ns->eth.tx_discards);
1143 printf("tx_errors: %lu\n", ns->eth.tx_errors);
1145 printf("tx_dropped_link_down: %lu\n", ns->tx_dropped_link_down);
1146 printf("crc_errors: %lu\n", ns->crc_errors);
1147 printf("illegal_bytes: %lu\n", ns->illegal_bytes);
1148 printf("error_bytes: %lu\n", ns->error_bytes);
1149 printf("mac_local_faults: %lu\n", ns->mac_local_faults);
1150 printf("mac_remote_faults: %lu\n", ns->mac_remote_faults);
1151 printf("rx_length_errors: %lu\n", ns->rx_length_errors);
1152 printf("link_xon_rx: %lu\n", ns->link_xon_rx);
1153 printf("link_xoff_rx: %lu\n", ns->link_xoff_rx);
1154 for (i = 0; i < 8; i++) {
1155 printf("priority_xon_rx[%d]: %lu\n",
1156 i, ns->priority_xon_rx[i]);
1157 printf("priority_xoff_rx[%d]: %lu\n",
1158 i, ns->priority_xoff_rx[i]);
1160 printf("link_xon_tx: %lu\n", ns->link_xon_tx);
1161 printf("link_xoff_tx: %lu\n", ns->link_xoff_tx);
1162 for (i = 0; i < 8; i++) {
1163 printf("priority_xon_tx[%d]: %lu\n",
1164 i, ns->priority_xon_tx[i]);
1165 printf("priority_xoff_tx[%d]: %lu\n",
1166 i, ns->priority_xoff_tx[i]);
1167 printf("priority_xon_2_xoff[%d]: %lu\n",
1168 i, ns->priority_xon_2_xoff[i]);
1170 printf("rx_size_64: %lu\n", ns->rx_size_64);
1171 printf("rx_size_127: %lu\n", ns->rx_size_127);
1172 printf("rx_size_255: %lu\n", ns->rx_size_255);
1173 printf("rx_size_511: %lu\n", ns->rx_size_511);
1174 printf("rx_size_1023: %lu\n", ns->rx_size_1023);
1175 printf("rx_size_1522: %lu\n", ns->rx_size_1522);
1176 printf("rx_size_big: %lu\n", ns->rx_size_big);
1177 printf("rx_undersize: %lu\n", ns->rx_undersize);
1178 printf("rx_fragments: %lu\n", ns->rx_fragments);
1179 printf("rx_oversize: %lu\n", ns->rx_oversize);
1180 printf("rx_jabber: %lu\n", ns->rx_jabber);
1181 printf("tx_size_64: %lu\n", ns->tx_size_64);
1182 printf("tx_size_127: %lu\n", ns->tx_size_127);
1183 printf("tx_size_255: %lu\n", ns->tx_size_255);
1184 printf("tx_size_511: %lu\n", ns->tx_size_511);
1185 printf("tx_size_1023: %lu\n", ns->tx_size_1023);
1186 printf("tx_size_1522: %lu\n", ns->tx_size_1522);
1187 printf("tx_size_big: %lu\n", ns->tx_size_big);
1188 printf("mac_short_packet_dropped: %lu\n",
1189 ns->mac_short_packet_dropped);
1190 printf("checksum_error: %lu\n", ns->checksum_error);
1191 printf("***************** PF stats end ********************\n");
1192 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
1195 /* Reset the statistics */
1197 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1199 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1201 /* It results in reloading the start point of each counter */
1202 pf->offset_loaded = false;
1206 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1207 __rte_unused uint16_t queue_id,
1208 __rte_unused uint8_t stat_idx,
1209 __rte_unused uint8_t is_rx)
1211 PMD_INIT_FUNC_TRACE();
1217 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1219 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1220 struct i40e_vsi *vsi = pf->main_vsi;
1222 dev_info->max_rx_queues = vsi->nb_qps;
1223 dev_info->max_tx_queues = vsi->nb_qps;
1224 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1225 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1226 dev_info->max_mac_addrs = vsi->max_macaddrs;
1227 dev_info->max_vfs = dev->pci_dev->max_vfs;
1228 dev_info->rx_offload_capa =
1229 DEV_RX_OFFLOAD_VLAN_STRIP |
1230 DEV_RX_OFFLOAD_IPV4_CKSUM |
1231 DEV_RX_OFFLOAD_UDP_CKSUM |
1232 DEV_RX_OFFLOAD_TCP_CKSUM;
1233 dev_info->tx_offload_capa =
1234 DEV_TX_OFFLOAD_VLAN_INSERT |
1235 DEV_TX_OFFLOAD_IPV4_CKSUM |
1236 DEV_TX_OFFLOAD_UDP_CKSUM |
1237 DEV_TX_OFFLOAD_TCP_CKSUM |
1238 DEV_TX_OFFLOAD_SCTP_CKSUM;
1242 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1244 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1245 struct i40e_vsi *vsi = pf->main_vsi;
1246 PMD_INIT_FUNC_TRACE();
1249 return i40e_vsi_add_vlan(vsi, vlan_id);
1251 return i40e_vsi_delete_vlan(vsi, vlan_id);
1255 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1256 __rte_unused uint16_t tpid)
1258 PMD_INIT_FUNC_TRACE();
1262 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1264 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1265 struct i40e_vsi *vsi = pf->main_vsi;
1267 if (mask & ETH_VLAN_STRIP_MASK) {
1268 /* Enable or disable VLAN stripping */
1269 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1270 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1272 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1275 if (mask & ETH_VLAN_EXTEND_MASK) {
1276 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1277 i40e_vsi_config_double_vlan(vsi, TRUE);
1279 i40e_vsi_config_double_vlan(vsi, FALSE);
1284 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1285 __rte_unused uint16_t queue,
1286 __rte_unused int on)
1288 PMD_INIT_FUNC_TRACE();
1292 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1294 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1295 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1296 struct i40e_vsi *vsi = pf->main_vsi;
1297 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1298 struct i40e_vsi_context ctxt;
1299 uint8_t vlan_flags = 0;
1304 * If insert pvid is enabled, only tagged pkts are
1305 * allowed to be sent out.
1307 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
1308 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
1310 if (data->dev_conf.txmode.hw_vlan_reject_tagged == 0)
1311 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
1312 if (data->dev_conf.txmode.hw_vlan_reject_untagged == 0)
1313 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
1315 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
1316 I40E_AQ_VSI_PVLAN_MODE_MASK);
1317 vsi->info.port_vlan_flags |= vlan_flags;
1318 vsi->info.pvid = pvid;
1319 vsi->info.valid_sections =
1320 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
1321 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1322 ctxt.seid = vsi->seid;
1323 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1324 if (ret != I40E_SUCCESS)
1325 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
1331 i40e_dev_led_on(struct rte_eth_dev *dev)
1333 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1334 uint32_t mode = i40e_led_get(hw);
1337 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1343 i40e_dev_led_off(struct rte_eth_dev *dev)
1345 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1346 uint32_t mode = i40e_led_get(hw);
1349 i40e_led_set(hw, 0, false);
1355 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1356 __rte_unused struct rte_eth_fc_conf *fc_conf)
1358 PMD_INIT_FUNC_TRACE();
1364 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1365 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1367 PMD_INIT_FUNC_TRACE();
1372 /* Add a MAC address, and update filters */
1374 i40e_macaddr_add(struct rte_eth_dev *dev,
1375 struct ether_addr *mac_addr,
1376 __attribute__((unused)) uint32_t index,
1377 __attribute__((unused)) uint32_t pool)
1379 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1380 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1381 struct i40e_vsi *vsi = pf->main_vsi;
1382 struct ether_addr old_mac;
1385 if (!is_valid_assigned_ether_addr(mac_addr)) {
1386 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1390 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1391 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address\n");
1395 /* Write mac address */
1396 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1397 mac_addr->addr_bytes, NULL);
1398 if (ret != I40E_SUCCESS) {
1399 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1403 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1404 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1407 ret = i40e_vsi_add_mac(vsi, mac_addr);
1408 if (ret != I40E_SUCCESS) {
1409 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
1413 ether_addr_copy(mac_addr, &pf->dev_addr);
1414 i40e_vsi_delete_mac(vsi, &old_mac);
1417 /* Remove a MAC address, and update filters */
1419 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1421 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1422 struct i40e_vsi *vsi = pf->main_vsi;
1423 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1424 struct ether_addr *macaddr;
1426 struct i40e_hw *hw =
1427 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1429 if (index >= vsi->max_macaddrs)
1432 macaddr = &(data->mac_addrs[index]);
1433 if (!is_valid_assigned_ether_addr(macaddr))
1436 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1437 hw->mac.perm_addr, NULL);
1438 if (ret != I40E_SUCCESS) {
1439 PMD_DRV_LOG(ERR, "Failed to write mac address\n");
1443 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1445 ret = i40e_vsi_delete_mac(vsi, macaddr);
1446 if (ret != I40E_SUCCESS)
1449 /* Clear device address as it has been removed */
1450 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1451 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1455 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1456 struct rte_eth_rss_reta *reta_conf)
1458 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1460 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1462 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1464 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1466 mask = (uint8_t)((reta_conf->mask_hi >>
1475 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1477 for (j = 0, lut = 0; j < 4; j++) {
1478 if (mask & (0x1 < j))
1479 lut |= reta_conf->reta[i + j] << (8 * j);
1481 lut |= l & (0xFF << (8 * j));
1483 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1490 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1491 struct rte_eth_rss_reta *reta_conf)
1493 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1495 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1497 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1499 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1501 mask = (uint8_t)((reta_conf->mask_hi >>
1507 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1508 for (j = 0; j < 4; j++) {
1509 if (mask & (0x1 << j))
1510 reta_conf->reta[i + j] =
1511 (uint8_t)((lut >> (8 * j)) & 0xFF);
1519 * i40e_allocate_dma_mem_d - specific memory alloc for shared code
1520 * @hw: pointer to the HW structure
1521 * @mem: pointer to mem struct to fill out
1522 * @size: size of memory requested
1523 * @alignment: what to align the allocation to
1525 enum i40e_status_code
1526 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1527 struct i40e_dma_mem *mem,
1531 static uint64_t id = 0;
1532 const struct rte_memzone *mz = NULL;
1533 char z_name[RTE_MEMZONE_NAMESIZE];
1536 return I40E_ERR_PARAM;
1539 rte_snprintf(z_name, sizeof(z_name), "i40e_dma_%lu", id);
1540 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1542 return I40E_ERR_NO_MEMORY;
1547 mem->pa = mz->phys_addr;
1549 return I40E_SUCCESS;
1553 * i40e_free_dma_mem_d - specific memory free for shared code
1554 * @hw: pointer to the HW structure
1555 * @mem: ptr to mem struct to free
1557 enum i40e_status_code
1558 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1559 struct i40e_dma_mem *mem)
1561 if (!mem || !mem->va)
1562 return I40E_ERR_PARAM;
1567 return I40E_SUCCESS;
1571 * i40e_allocate_virt_mem_d - specific memory alloc for shared code
1572 * @hw: pointer to the HW structure
1573 * @mem: pointer to mem struct to fill out
1574 * @size: size of memory requested
1576 enum i40e_status_code
1577 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1578 struct i40e_virt_mem *mem,
1582 return I40E_ERR_PARAM;
1585 mem->va = rte_zmalloc("i40e", size, 0);
1588 return I40E_SUCCESS;
1590 return I40E_ERR_NO_MEMORY;
1594 * i40e_free_virt_mem_d - specific memory free for shared code
1595 * @hw: pointer to the HW structure
1596 * @mem: pointer to mem struct to free
1598 enum i40e_status_code
1599 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1600 struct i40e_virt_mem *mem)
1603 return I40E_ERR_PARAM;
1608 return I40E_SUCCESS;
1612 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1614 rte_spinlock_init(&sp->spinlock);
1618 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1620 rte_spinlock_lock(&sp->spinlock);
1624 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1626 rte_spinlock_unlock(&sp->spinlock);
1630 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1636 * Get the hardware capabilities, which will be parsed
1637 * and saved into struct i40e_hw.
1640 i40e_get_cap(struct i40e_hw *hw)
1642 struct i40e_aqc_list_capabilities_element_resp *buf;
1643 uint16_t len, size = 0;
1646 /* Calculate a huge enough buff for saving response data temporarily */
1647 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1648 I40E_MAX_CAP_ELE_NUM;
1649 buf = rte_zmalloc("i40e", len, 0);
1651 PMD_DRV_LOG(ERR, "Failed to allocate memory\n");
1652 return I40E_ERR_NO_MEMORY;
1655 /* Get, parse the capabilities and save it to hw */
1656 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1657 i40e_aqc_opc_list_func_capabilities, NULL);
1658 if (ret != I40E_SUCCESS)
1659 PMD_DRV_LOG(ERR, "Failed to discover capabilities\n");
1661 /* Free the temporary buffer after being used */
1668 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1670 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1671 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1672 uint16_t sum_queues = 0, sum_vsis;
1674 /* First check if FW support SRIOV */
1675 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1676 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV\n");
1680 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1681 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1682 PMD_INIT_LOG(INFO, "Max supported VSIs:%u\n", pf->max_num_vsi);
1683 /* Allocate queues for pf */
1684 if (hw->func_caps.rss) {
1685 pf->flags |= I40E_FLAG_RSS;
1686 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1687 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1688 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1691 sum_queues = pf->lan_nb_qps;
1692 /* Default VSI is not counted in */
1694 PMD_INIT_LOG(INFO, "PF queue pairs:%u\n", pf->lan_nb_qps);
1696 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1697 pf->flags |= I40E_FLAG_SRIOV;
1698 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1699 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1700 PMD_INIT_LOG(ERR, "Config VF number %u, "
1701 "max supported %u.\n", dev->pci_dev->max_vfs,
1702 hw->func_caps.num_vfs);
1705 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1706 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1707 "max support %u queues.\n", pf->vf_nb_qps,
1708 I40E_MAX_QP_NUM_PER_VF);
1711 pf->vf_num = dev->pci_dev->max_vfs;
1712 sum_queues += pf->vf_nb_qps * pf->vf_num;
1713 sum_vsis += pf->vf_num;
1714 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
1715 pf->vf_num, pf->vf_nb_qps);
1719 if (hw->func_caps.vmdq) {
1720 pf->flags |= I40E_FLAG_VMDQ;
1721 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
1722 sum_queues += pf->vmdq_nb_qps;
1724 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u\n", pf->vmdq_nb_qps);
1727 if (hw->func_caps.fd) {
1728 pf->flags |= I40E_FLAG_FDIR;
1729 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
1731 * Each flow director consumes one VSI and one queue,
1732 * but can't calculate out predictably here.
1736 if (sum_vsis > pf->max_num_vsi ||
1737 sum_queues > hw->func_caps.num_rx_qp) {
1738 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
1739 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
1740 pf->max_num_vsi, sum_vsis);
1741 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
1742 hw->func_caps.num_rx_qp, sum_queues);
1746 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
1747 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
1748 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
1749 sum_vsis, hw->func_caps.num_msix_vectors);
1752 return I40E_SUCCESS;
1756 i40e_pf_get_switch_config(struct i40e_pf *pf)
1758 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1759 struct i40e_aqc_get_switch_config_resp *switch_config;
1760 struct i40e_aqc_switch_config_element_resp *element;
1761 uint16_t start_seid = 0, num_reported;
1764 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
1765 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
1766 if (!switch_config) {
1767 PMD_DRV_LOG(ERR, "Failed to allocated memory\n");
1771 /* Get the switch configurations */
1772 ret = i40e_aq_get_switch_config(hw, switch_config,
1773 I40E_AQ_LARGE_BUF, &start_seid, NULL);
1774 if (ret != I40E_SUCCESS) {
1775 PMD_DRV_LOG(ERR, "Failed to get switch configurations\n");
1778 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
1779 if (num_reported != 1) { /* The number should be 1 */
1780 PMD_DRV_LOG(ERR, "Wrong number of switch config reported\n");
1784 /* Parse the switch configuration elements */
1785 element = &(switch_config->element[0]);
1786 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
1787 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
1788 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
1790 PMD_DRV_LOG(INFO, "Unknown element type\n");
1793 rte_free(switch_config);
1799 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
1802 struct pool_entry *entry;
1804 if (pool == NULL || num == 0)
1807 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
1808 if (entry == NULL) {
1809 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1814 /* queue heap initialize */
1815 pool->num_free = num;
1816 pool->num_alloc = 0;
1818 LIST_INIT(&pool->alloc_list);
1819 LIST_INIT(&pool->free_list);
1821 /* Initialize element */
1825 LIST_INSERT_HEAD(&pool->free_list, entry, next);
1830 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
1832 struct pool_entry *entry;
1837 LIST_FOREACH(entry, &pool->alloc_list, next) {
1838 LIST_REMOVE(entry, next);
1842 LIST_FOREACH(entry, &pool->free_list, next) {
1843 LIST_REMOVE(entry, next);
1848 pool->num_alloc = 0;
1850 LIST_INIT(&pool->alloc_list);
1851 LIST_INIT(&pool->free_list);
1855 i40e_res_pool_free(struct i40e_res_pool_info *pool,
1858 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
1859 uint32_t pool_offset;
1863 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1867 pool_offset = base - pool->base;
1868 /* Lookup in alloc list */
1869 LIST_FOREACH(entry, &pool->alloc_list, next) {
1870 if (entry->base == pool_offset) {
1871 valid_entry = entry;
1872 LIST_REMOVE(entry, next);
1877 /* Not find, return */
1878 if (valid_entry == NULL) {
1879 PMD_DRV_LOG(ERR, "Failed to find entry\n");
1884 * Found it, move it to free list and try to merge.
1885 * In order to make merge easier, always sort it by qbase.
1886 * Find adjacent prev and last entries.
1889 LIST_FOREACH(entry, &pool->free_list, next) {
1890 if (entry->base > valid_entry->base) {
1898 /* Try to merge with next one*/
1900 /* Merge with next one */
1901 if (valid_entry->base + valid_entry->len == next->base) {
1902 next->base = valid_entry->base;
1903 next->len += valid_entry->len;
1904 rte_free(valid_entry);
1911 /* Merge with previous one */
1912 if (prev->base + prev->len == valid_entry->base) {
1913 prev->len += valid_entry->len;
1914 /* If it merge with next one, remove next node */
1916 LIST_REMOVE(valid_entry, next);
1917 rte_free(valid_entry);
1919 rte_free(valid_entry);
1925 /* Not find any entry to merge, insert */
1928 LIST_INSERT_AFTER(prev, valid_entry, next);
1929 else if (next != NULL)
1930 LIST_INSERT_BEFORE(next, valid_entry, next);
1931 else /* It's empty list, insert to head */
1932 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
1935 pool->num_free += valid_entry->len;
1936 pool->num_alloc -= valid_entry->len;
1942 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
1945 struct pool_entry *entry, *valid_entry;
1947 if (pool == NULL || num == 0) {
1948 PMD_DRV_LOG(ERR, "Invalid parameter\n");
1952 if (pool->num_free < num) {
1953 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
1954 num, pool->num_free);
1959 /* Lookup in free list and find most fit one */
1960 LIST_FOREACH(entry, &pool->free_list, next) {
1961 if (entry->len >= num) {
1963 if (entry->len == num) {
1964 valid_entry = entry;
1967 if (valid_entry == NULL || valid_entry->len > entry->len)
1968 valid_entry = entry;
1972 /* Not find one to satisfy the request, return */
1973 if (valid_entry == NULL) {
1974 PMD_DRV_LOG(ERR, "No valid entry found\n");
1978 * The entry have equal queue number as requested,
1979 * remove it from alloc_list.
1981 if (valid_entry->len == num) {
1982 LIST_REMOVE(valid_entry, next);
1985 * The entry have more numbers than requested,
1986 * create a new entry for alloc_list and minus its
1987 * queue base and number in free_list.
1989 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
1990 if (entry == NULL) {
1991 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1995 entry->base = valid_entry->base;
1997 valid_entry->base += num;
1998 valid_entry->len -= num;
1999 valid_entry = entry;
2002 /* Insert it into alloc list, not sorted */
2003 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2005 pool->num_free -= valid_entry->len;
2006 pool->num_alloc += valid_entry->len;
2008 return (valid_entry->base + pool->base);
2012 * bitmap_is_subset - Check whether src2 is subset of src1
2015 bitmap_is_subset(uint8_t src1, uint8_t src2)
2017 return !((src1 ^ src2) & src2);
2021 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2023 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2025 /* If DCB is not supported, only default TC is supported */
2026 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2027 PMD_DRV_LOG(ERR, "DCB is not enabled, "
2028 "only TC0 is supported\n");
2032 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2033 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2034 "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
2038 return I40E_SUCCESS;
2042 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2044 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2046 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2048 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2049 if (ret != I40E_SUCCESS)
2053 PMD_DRV_LOG(ERR, "seid not valid\n");
2057 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2058 tc_bw_data.tc_valid_bits = enabled_tcmap;
2059 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2060 tc_bw_data.tc_bw_credits[i] =
2061 (enabled_tcmap & (1 << i)) ? 1 : 0;
2063 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2064 if (ret != I40E_SUCCESS) {
2065 PMD_DRV_LOG(ERR, "Failed to configure TC BW\n");
2069 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2070 sizeof(vsi->info.qs_handle));
2071 return I40E_SUCCESS;
2075 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2076 struct i40e_aqc_vsi_properties_data *info,
2077 uint8_t enabled_tcmap)
2079 int ret, total_tc = 0, i;
2080 uint16_t qpnum_per_tc, bsf, qp_idx;
2082 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2083 if (ret != I40E_SUCCESS)
2086 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2087 if (enabled_tcmap & (1 << i))
2089 vsi->enabled_tc = enabled_tcmap;
2091 /* Number of queues per enabled TC */
2092 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2093 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2094 bsf = rte_bsf32(qpnum_per_tc);
2096 /* Adjust the queue number to actual queues that can be applied */
2097 vsi->nb_qps = qpnum_per_tc * total_tc;
2100 * Configure TC and queue mapping parameters, for enabled TC,
2101 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2102 * default queue will serve it.
2105 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2106 if (vsi->enabled_tc & (1 << i)) {
2107 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2108 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2109 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2110 qp_idx += qpnum_per_tc;
2112 info->tc_mapping[i] = 0;
2115 /* Associate queue number with VSI */
2116 if (vsi->type == I40E_VSI_SRIOV) {
2117 info->mapping_flags |=
2118 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2119 for (i = 0; i < vsi->nb_qps; i++)
2120 info->queue_mapping[i] =
2121 rte_cpu_to_le_16(vsi->base_queue + i);
2123 info->mapping_flags |=
2124 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2125 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2127 info->valid_sections =
2128 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2130 return I40E_SUCCESS;
2134 i40e_veb_release(struct i40e_veb *veb)
2136 struct i40e_vsi *vsi;
2139 if (veb == NULL || veb->associate_vsi == NULL)
2142 if (!TAILQ_EMPTY(&veb->head)) {
2143 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove\n");
2147 vsi = veb->associate_vsi;
2148 hw = I40E_VSI_TO_HW(vsi);
2150 vsi->uplink_seid = veb->uplink_seid;
2151 i40e_aq_delete_element(hw, veb->seid, NULL);
2154 return I40E_SUCCESS;
2158 static struct i40e_veb *
2159 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2161 struct i40e_veb *veb;
2165 if (NULL == pf || vsi == NULL) {
2166 PMD_DRV_LOG(ERR, "veb setup failed, "
2167 "associated VSI shouldn't null\n");
2170 hw = I40E_PF_TO_HW(pf);
2172 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2174 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb\n");
2178 veb->associate_vsi = vsi;
2179 TAILQ_INIT(&veb->head);
2180 veb->uplink_seid = vsi->uplink_seid;
2182 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2183 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2185 if (ret != I40E_SUCCESS) {
2186 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
2187 hw->aq.asq_last_status);
2191 /* get statistics index */
2192 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2193 &veb->stats_idx, NULL, NULL, NULL);
2194 if (ret != I40E_SUCCESS) {
2195 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
2196 hw->aq.asq_last_status);
2200 /* Get VEB bandwidth, to be implemented */
2201 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2202 vsi->uplink_seid = veb->seid;
2211 i40e_vsi_release(struct i40e_vsi *vsi)
2215 struct i40e_vsi_list *vsi_list;
2217 struct i40e_mac_filter *f;
2220 return I40E_SUCCESS;
2222 pf = I40E_VSI_TO_PF(vsi);
2223 hw = I40E_VSI_TO_HW(vsi);
2225 /* VSI has child to attach, release child first */
2227 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2228 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2230 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2232 i40e_veb_release(vsi->veb);
2235 /* Remove all macvlan filters of the VSI */
2236 i40e_vsi_remove_all_macvlan_filter(vsi);
2237 TAILQ_FOREACH(f, &vsi->mac_list, next)
2240 if (vsi->type != I40E_VSI_MAIN) {
2241 /* Remove vsi from parent's sibling list */
2242 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2243 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL\n");
2244 return I40E_ERR_PARAM;
2246 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2247 &vsi->sib_vsi_list, list);
2249 /* Remove all switch element of the VSI */
2250 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2251 if (ret != I40E_SUCCESS)
2252 PMD_DRV_LOG(ERR, "Failed to delete element\n");
2254 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2256 if (vsi->type != I40E_VSI_SRIOV)
2257 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2260 return I40E_SUCCESS;
2264 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2266 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2267 struct i40e_aqc_remove_macvlan_element_data def_filter;
2270 if (vsi->type != I40E_VSI_MAIN)
2271 return I40E_ERR_CONFIG;
2272 memset(&def_filter, 0, sizeof(def_filter));
2273 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2275 def_filter.vlan_tag = 0;
2276 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2277 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2278 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2279 if (ret != I40E_SUCCESS)
2282 return i40e_vsi_add_mac(vsi, (struct ether_addr *)(hw->mac.perm_addr));
2286 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2288 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2289 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2290 struct i40e_hw *hw = &vsi->adapter->hw;
2294 memset(&bw_config, 0, sizeof(bw_config));
2295 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2296 if (ret != I40E_SUCCESS) {
2297 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
2298 "configuration %u\n", hw->aq.asq_last_status);
2302 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2303 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2304 &ets_sla_config, NULL);
2305 if (ret != I40E_SUCCESS) {
2306 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2307 "configuration %u\n", hw->aq.asq_last_status);
2311 /* Not store the info yet, just print out */
2312 PMD_DRV_LOG(INFO, "VSI bw limit:%u\n", bw_config.port_bw_limit);
2313 PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
2314 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2315 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
2316 ets_sla_config.share_credits[i]);
2317 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
2318 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2319 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2320 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2329 i40e_vsi_setup(struct i40e_pf *pf,
2330 enum i40e_vsi_type type,
2331 struct i40e_vsi *uplink_vsi,
2332 uint16_t user_param)
2334 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2335 struct i40e_vsi *vsi;
2337 struct i40e_vsi_context ctxt;
2338 struct ether_addr broadcast =
2339 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2341 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2342 PMD_DRV_LOG(ERR, "VSI setup failed, "
2343 "VSI link shouldn't be NULL\n");
2347 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2348 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2349 "uplink VSI should be NULL\n");
2353 /* If uplink vsi didn't setup VEB, create one first */
2354 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2355 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2357 if (NULL == uplink_vsi->veb) {
2358 PMD_DRV_LOG(ERR, "VEB setup failed\n");
2363 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2365 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi\n");
2368 TAILQ_INIT(&vsi->mac_list);
2370 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2371 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2372 vsi->parent_vsi = uplink_vsi;
2373 vsi->user_param = user_param;
2374 /* Allocate queues */
2375 switch (vsi->type) {
2376 case I40E_VSI_MAIN :
2377 vsi->nb_qps = pf->lan_nb_qps;
2379 case I40E_VSI_SRIOV :
2380 vsi->nb_qps = pf->vf_nb_qps;
2385 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2387 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2391 vsi->base_queue = ret;
2393 /* VF has MSIX interrupt in VF range, don't allocate here */
2394 if (type != I40E_VSI_SRIOV) {
2395 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2397 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2398 goto fail_queue_alloc;
2400 vsi->msix_intr = ret;
2404 if (type == I40E_VSI_MAIN) {
2405 /* For main VSI, no need to add since it's default one */
2406 vsi->uplink_seid = pf->mac_seid;
2407 vsi->seid = pf->main_vsi_seid;
2408 /* Bind queues with specific MSIX interrupt */
2410 * Needs 2 interrupt at least, one for misc cause which will
2411 * enabled from OS side, Another for queues binding the
2412 * interrupt from device side only.
2415 /* Get default VSI parameters from hardware */
2416 memset(&ctxt, 0, sizeof(ctxt));
2417 ctxt.seid = vsi->seid;
2418 ctxt.pf_num = hw->pf_id;
2419 ctxt.uplink_seid = vsi->uplink_seid;
2421 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2422 if (ret != I40E_SUCCESS) {
2423 PMD_DRV_LOG(ERR, "Failed to get VSI params\n");
2424 goto fail_msix_alloc;
2426 (void)rte_memcpy(&vsi->info, &ctxt.info,
2427 sizeof(struct i40e_aqc_vsi_properties_data));
2428 vsi->vsi_id = ctxt.vsi_number;
2429 vsi->info.valid_sections = 0;
2431 /* Configure tc, enabled TC0 only */
2432 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2434 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth\n");
2435 goto fail_msix_alloc;
2438 /* TC, queue mapping */
2439 memset(&ctxt, 0, sizeof(ctxt));
2440 vsi->info.valid_sections |=
2441 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2442 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2443 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2444 (void)rte_memcpy(&ctxt.info, &vsi->info,
2445 sizeof(struct i40e_aqc_vsi_properties_data));
2446 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2447 I40E_DEFAULT_TCMAP);
2448 if (ret != I40E_SUCCESS) {
2449 PMD_DRV_LOG(ERR, "Failed to configure "
2450 "TC queue mapping\n");
2451 goto fail_msix_alloc;
2453 ctxt.seid = vsi->seid;
2454 ctxt.pf_num = hw->pf_id;
2455 ctxt.uplink_seid = vsi->uplink_seid;
2458 /* Update VSI parameters */
2459 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2460 if (ret != I40E_SUCCESS) {
2461 PMD_DRV_LOG(ERR, "Failed to update VSI params\n");
2462 goto fail_msix_alloc;
2465 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2466 sizeof(vsi->info.tc_mapping));
2467 (void)rte_memcpy(&vsi->info.queue_mapping,
2468 &ctxt.info.queue_mapping,
2469 sizeof(vsi->info.queue_mapping));
2470 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2471 vsi->info.valid_sections = 0;
2473 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2475 ret = i40e_update_default_filter_setting(vsi);
2476 if (ret != I40E_SUCCESS) {
2477 PMD_DRV_LOG(ERR, "Failed to remove default "
2478 "filter setting\n");
2479 goto fail_msix_alloc;
2482 else if (type == I40E_VSI_SRIOV) {
2483 memset(&ctxt, 0, sizeof(ctxt));
2485 * For other VSI, the uplink_seid equals to uplink VSI's
2486 * uplink_seid since they share same VEB
2488 vsi->uplink_seid = uplink_vsi->uplink_seid;
2489 ctxt.pf_num = hw->pf_id;
2490 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2491 ctxt.uplink_seid = vsi->uplink_seid;
2492 ctxt.connection_type = 0x1;
2493 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2495 /* Configure switch ID */
2496 ctxt.info.valid_sections |=
2497 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2498 ctxt.info.switch_id =
2499 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2500 /* Configure port/vlan */
2501 ctxt.info.valid_sections |=
2502 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2503 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2504 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2505 I40E_DEFAULT_TCMAP);
2506 if (ret != I40E_SUCCESS) {
2507 PMD_DRV_LOG(ERR, "Failed to configure "
2508 "TC queue mapping\n");
2509 goto fail_msix_alloc;
2511 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2512 ctxt.info.valid_sections |=
2513 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2515 * Since VSI is not created yet, only configure parameter,
2516 * will add vsi below.
2520 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet\n");
2521 goto fail_msix_alloc;
2524 if (vsi->type != I40E_VSI_MAIN) {
2525 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2527 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
2528 hw->aq.asq_last_status);
2529 goto fail_msix_alloc;
2531 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2532 vsi->info.valid_sections = 0;
2533 vsi->seid = ctxt.seid;
2534 vsi->vsi_id = ctxt.vsi_number;
2535 vsi->sib_vsi_list.vsi = vsi;
2536 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2537 &vsi->sib_vsi_list, list);
2540 /* MAC/VLAN configuration */
2541 ret = i40e_vsi_add_mac(vsi, &broadcast);
2542 if (ret != I40E_SUCCESS) {
2543 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter\n");
2544 goto fail_msix_alloc;
2547 /* Get VSI BW information */
2548 i40e_vsi_dump_bw_config(vsi);
2551 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2553 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2559 /* Configure vlan stripping on or off */
2561 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2563 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2564 struct i40e_vsi_context ctxt;
2566 int ret = I40E_SUCCESS;
2568 /* Check if it has been already on or off */
2569 if (vsi->info.valid_sections &
2570 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2572 if ((vsi->info.port_vlan_flags &
2573 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2574 return 0; /* already on */
2576 if ((vsi->info.port_vlan_flags &
2577 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2578 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2579 return 0; /* already off */
2584 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2586 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2587 vsi->info.valid_sections =
2588 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2589 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2590 vsi->info.port_vlan_flags |= vlan_flags;
2591 ctxt.seid = vsi->seid;
2592 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2593 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2595 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
2596 on ? "enable" : "disable");
2602 i40e_vsi_init_vlan(struct i40e_vsi *vsi)
2604 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2605 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
2606 struct i40e_vsi_context ctxt;
2607 uint8_t vlan_flags = 0;
2611 if (data->dev_conf.txmode.hw_vlan_insert_pvid == 1) {
2613 * If insert pvid is enabled, only tagged pkts are
2614 * allowed to be sent out.
2616 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2617 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2619 if (data->dev_conf.txmode.hw_vlan_reject_tagged == 0)
2620 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2621 if (data->dev_conf.txmode.hw_vlan_reject_untagged == 0)
2622 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2625 /* Strip VLAN tag or not */
2626 if (data->dev_conf.rxmode.hw_vlan_strip == 0)
2627 vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2629 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_MODE_MASK |
2630 I40E_AQ_VSI_PVLAN_INSERT_PVID | I40E_AQ_VSI_PVLAN_EMOD_MASK);
2631 vsi->info.port_vlan_flags |= vlan_flags;
2632 vsi->info.pvid = data->dev_conf.txmode.pvid;
2633 vsi->info.valid_sections =
2634 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2636 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2637 ctxt.seid = vsi->seid;
2638 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2639 if (ret != I40E_SUCCESS)
2640 PMD_DRV_LOG(INFO, "Failed to update VSI params\n");
2646 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2648 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2650 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2654 i40e_update_flow_control(struct i40e_hw *hw)
2656 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2657 struct i40e_link_status link_status;
2658 uint32_t rxfc = 0, txfc = 0, reg;
2662 memset(&link_status, 0, sizeof(link_status));
2663 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
2664 if (ret != I40E_SUCCESS) {
2665 PMD_DRV_LOG(ERR, "Failed to get link status information\n");
2666 goto write_reg; /* Disable flow control */
2669 an_info = hw->phy.link_info.an_info;
2670 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
2671 PMD_DRV_LOG(INFO, "Link auto negotiation not completed\n");
2672 ret = I40E_ERR_NOT_READY;
2673 goto write_reg; /* Disable flow control */
2676 * If link auto negotiation is enabled, flow control needs to
2677 * be configured according to it
2679 switch (an_info & I40E_LINK_PAUSE_RXTX) {
2680 case I40E_LINK_PAUSE_RXTX:
2683 hw->fc.current_mode = I40E_FC_FULL;
2685 case I40E_AQ_LINK_PAUSE_RX:
2687 hw->fc.current_mode = I40E_FC_RX_PAUSE;
2689 case I40E_AQ_LINK_PAUSE_TX:
2691 hw->fc.current_mode = I40E_FC_TX_PAUSE;
2694 hw->fc.current_mode = I40E_FC_NONE;
2699 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
2700 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
2701 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
2702 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
2703 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
2704 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
2711 i40e_pf_setup(struct i40e_pf *pf)
2713 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2714 struct i40e_filter_control_settings settings;
2715 struct rte_eth_dev_data *dev_data = pf->dev_data;
2716 struct i40e_vsi *vsi;
2719 /* Clear all stats counters */
2720 pf->offset_loaded = FALSE;
2721 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
2722 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
2724 ret = i40e_pf_get_switch_config(pf);
2725 if (ret != I40E_SUCCESS) {
2726 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
2731 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
2733 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
2734 return I40E_ERR_NOT_READY;
2737 dev_data->nb_rx_queues = vsi->nb_qps;
2738 dev_data->nb_tx_queues = vsi->nb_qps;
2740 /* Configure filter control */
2741 memset(&settings, 0, sizeof(settings));
2742 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
2743 /* Enable ethtype and macvlan filters */
2744 settings.enable_ethtype = TRUE;
2745 settings.enable_macvlan = TRUE;
2746 ret = i40e_set_filter_control(hw, &settings);
2748 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
2751 /* Update flow control according to the auto negotiation */
2752 i40e_update_flow_control(hw);
2754 return I40E_SUCCESS;
2758 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2763 /* Wait until the request is finished */
2764 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2765 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2766 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2767 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2768 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
2774 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2775 return I40E_SUCCESS; /* already on, skip next steps */
2776 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2778 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2779 return I40E_SUCCESS; /* already off, skip next steps */
2780 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2782 /* Write the register */
2783 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
2784 /* Check the result */
2785 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2786 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2787 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
2789 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2790 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
2793 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
2794 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
2798 /* Check if it is timeout */
2799 if (j >= I40E_CHK_Q_ENA_COUNT) {
2800 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
2801 (on ? "enable" : "disable"), q_idx);
2802 return I40E_ERR_TIMEOUT;
2804 return I40E_SUCCESS;
2806 /* Swith on or off the tx queues */
2808 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
2810 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2811 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2812 struct i40e_tx_queue *txq;
2816 pf_q = vsi->base_queue;
2817 for (i = 0; i < dev_data->nb_tx_queues; i++, pf_q++) {
2818 txq = dev_data->tx_queues[i];
2820 continue; /* Queue not configured */
2821 ret = i40e_switch_tx_queue(hw, pf_q, on);
2822 if ( ret != I40E_SUCCESS)
2826 return I40E_SUCCESS;
2830 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
2835 /* Wait until the request is finished */
2836 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2837 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2838 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2839 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
2840 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
2845 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2846 return I40E_SUCCESS; /* Already on, skip next steps */
2847 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2849 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2850 return I40E_SUCCESS; /* Already off, skip next steps */
2851 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2854 /* Write the register */
2855 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
2856 /* Check the result */
2857 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
2858 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
2859 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
2861 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2862 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
2865 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
2866 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
2871 /* Check if it is timeout */
2872 if (j >= I40E_CHK_Q_ENA_COUNT) {
2873 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
2874 (on ? "enable" : "disable"), q_idx);
2875 return I40E_ERR_TIMEOUT;
2878 return I40E_SUCCESS;
2880 /* Switch on or off the rx queues */
2882 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
2884 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
2885 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2886 struct i40e_rx_queue *rxq;
2890 pf_q = vsi->base_queue;
2891 for (i = 0; i < dev_data->nb_rx_queues; i++, pf_q++) {
2892 rxq = dev_data->rx_queues[i];
2894 continue; /* Queue not configured */
2895 ret = i40e_switch_rx_queue(hw, pf_q, on);
2896 if ( ret != I40E_SUCCESS)
2900 return I40E_SUCCESS;
2903 /* Switch on or off all the rx/tx queues */
2905 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
2910 /* enable rx queues before enabling tx queues */
2911 ret = i40e_vsi_switch_rx_queues(vsi, on);
2913 PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
2916 ret = i40e_vsi_switch_tx_queues(vsi, on);
2918 /* Stop tx queues before stopping rx queues */
2919 ret = i40e_vsi_switch_tx_queues(vsi, on);
2921 PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
2924 ret = i40e_vsi_switch_rx_queues(vsi, on);
2930 /* Initialize VSI for TX */
2932 i40e_vsi_tx_init(struct i40e_vsi *vsi)
2934 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2935 struct rte_eth_dev_data *data = pf->dev_data;
2937 uint32_t ret = I40E_SUCCESS;
2939 for (i = 0; i < data->nb_tx_queues; i++) {
2940 ret = i40e_tx_queue_init(data->tx_queues[i]);
2941 if (ret != I40E_SUCCESS)
2948 /* Initialize VSI for RX */
2950 i40e_vsi_rx_init(struct i40e_vsi *vsi)
2952 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2953 struct rte_eth_dev_data *data = pf->dev_data;
2954 int ret = I40E_SUCCESS;
2957 i40e_pf_config_mq_rx(pf);
2958 for (i = 0; i < data->nb_rx_queues; i++) {
2959 ret = i40e_rx_queue_init(data->rx_queues[i]);
2960 if (ret != I40E_SUCCESS) {
2961 PMD_DRV_LOG(ERR, "Failed to do RX queue "
2962 "initialization\n");
2970 /* Initialize VSI */
2972 i40e_vsi_init(struct i40e_vsi *vsi)
2976 err = i40e_vsi_tx_init(vsi);
2978 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization\n");
2981 err = i40e_vsi_rx_init(vsi);
2983 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization\n");
2991 i40e_stat_update_32(struct i40e_hw *hw,
2999 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3003 if (new_data >= *offset)
3004 *stat = (uint64_t)(new_data - *offset);
3006 *stat = (uint64_t)((new_data +
3007 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3011 i40e_stat_update_48(struct i40e_hw *hw,
3020 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3021 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3022 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3027 if (new_data >= *offset)
3028 *stat = new_data - *offset;
3030 *stat = (uint64_t)((new_data +
3031 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3033 *stat &= I40E_48_BIT_MASK;
3038 i40e_pf_disable_irq0(struct i40e_hw *hw)
3040 /* Disable all interrupt types */
3041 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3042 I40E_WRITE_FLUSH(hw);
3047 i40e_pf_enable_irq0(struct i40e_hw *hw)
3049 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3050 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3051 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3052 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3053 I40E_WRITE_FLUSH(hw);
3057 i40e_pf_config_irq0(struct i40e_hw *hw)
3061 /* read pending request and disable first */
3062 i40e_pf_disable_irq0(hw);
3064 * Enable all interrupt error options to detect possible errors,
3065 * other informative int are ignored
3067 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3068 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3069 I40E_PFINT_ICR0_ENA_GRST_MASK |
3070 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3071 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3072 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3073 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3074 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3076 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3077 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3078 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3080 /* Link no queues with irq0 */
3081 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3082 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3086 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3088 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3089 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3092 uint32_t index, offset, val;
3097 * Try to find which VF trigger a reset, use absolute VF id to access
3098 * since the reg is global register.
3100 for (i = 0; i < pf->vf_num; i++) {
3101 abs_vf_id = hw->func_caps.vf_base_id + i;
3102 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3103 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3104 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3105 /* VFR event occured */
3106 if (val & (0x1 << offset)) {
3109 /* Clear the event first */
3110 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3112 PMD_DRV_LOG(INFO, "VF %u reset occured\n", abs_vf_id);
3114 * Only notify a VF reset event occured,
3115 * don't trigger another SW reset
3117 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3118 if (ret != I40E_SUCCESS)
3119 PMD_DRV_LOG(ERR, "Failed to do VF reset\n");
3125 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3127 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3128 struct i40e_arq_event_info info;
3129 uint16_t pending, opcode;
3132 info.msg_size = I40E_AQ_BUF_SZ;
3133 info.msg_buf = rte_zmalloc("msg_buffer", I40E_AQ_BUF_SZ, 0);
3134 if (!info.msg_buf) {
3135 PMD_DRV_LOG(ERR, "Failed to allocate mem\n");
3141 ret = i40e_clean_arq_element(hw, &info, &pending);
3143 if (ret != I40E_SUCCESS) {
3144 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3145 "aq_err: %u\n", hw->aq.asq_last_status);
3148 opcode = rte_le_to_cpu_16(info.desc.opcode);
3151 case i40e_aqc_opc_send_msg_to_pf:
3152 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3153 i40e_pf_host_handle_vf_msg(dev,
3154 rte_le_to_cpu_16(info.desc.retval),
3155 rte_le_to_cpu_32(info.desc.cookie_high),
3156 rte_le_to_cpu_32(info.desc.cookie_low),
3161 PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
3165 /* Reset the buffer after processing one */
3166 info.msg_size = I40E_AQ_BUF_SZ;
3168 rte_free(info.msg_buf);
3172 * Interrupt handler triggered by NIC for handling
3173 * specific interrupt.
3176 * Pointer to interrupt handle.
3178 * The address of parameter (struct rte_eth_dev *) regsitered before.
3184 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3187 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3188 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3189 uint32_t cause, enable;
3191 i40e_pf_disable_irq0(hw);
3193 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3194 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3196 /* Shared IRQ case, return */
3197 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3198 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3199 "no INT event to process\n", hw->pf_id);
3203 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3204 PMD_DRV_LOG(INFO, "INT:Link status changed\n");
3205 i40e_dev_link_update(dev, 0);
3208 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3209 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error\n");
3211 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3212 PMD_DRV_LOG(INFO, "INT:Malicious programming detected\n");
3214 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3215 PMD_DRV_LOG(INFO, "INT:Global Resets Requested\n");
3217 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3218 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured\n");
3220 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3221 PMD_DRV_LOG(INFO, "INT:HMC error occured\n");
3223 /* Add processing func to deal with VF reset vent */
3224 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3225 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3226 i40e_dev_handle_vfr_event(dev);
3228 /* Find admin queue event */
3229 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3230 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3231 i40e_dev_handle_aq_msg(dev);
3235 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3236 /* Re-enable interrupt from device side */
3237 i40e_pf_enable_irq0(hw);
3238 /* Re-enable interrupt from host side */
3239 rte_intr_enable(&(dev->pci_dev->intr_handle));
3243 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3244 struct i40e_macvlan_filter *filter,
3247 int ele_num, ele_buff_size;
3248 int num, actual_num, i;
3249 int ret = I40E_SUCCESS;
3250 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3251 struct i40e_aqc_add_macvlan_element_data *req_list;
3253 if (filter == NULL || total == 0)
3254 return I40E_ERR_PARAM;
3255 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3256 ele_buff_size = hw->aq.asq_buf_size;
3258 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3259 if (req_list == NULL) {
3260 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3261 return I40E_ERR_NO_MEMORY;
3266 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3267 memset(req_list, 0, ele_buff_size);
3269 for (i = 0; i < actual_num; i++) {
3270 (void)rte_memcpy(req_list[i].mac_addr,
3271 &filter[num + i].macaddr, ETH_ADDR_LEN);
3272 req_list[i].vlan_tag =
3273 rte_cpu_to_le_16(filter[num + i].vlan_id);
3274 req_list[i].flags = rte_cpu_to_le_16(\
3275 I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
3276 req_list[i].queue_number = 0;
3279 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3281 if (ret != I40E_SUCCESS) {
3282 PMD_DRV_LOG(ERR, "Failed to add macvlan filter\n");
3286 } while (num < total);
3294 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3295 struct i40e_macvlan_filter *filter,
3298 int ele_num, ele_buff_size;
3299 int num, actual_num, i;
3300 int ret = I40E_SUCCESS;
3301 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3302 struct i40e_aqc_remove_macvlan_element_data *req_list;
3304 if (filter == NULL || total == 0)
3305 return I40E_ERR_PARAM;
3307 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3308 ele_buff_size = hw->aq.asq_buf_size;
3310 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3311 if (req_list == NULL) {
3312 PMD_DRV_LOG(ERR, "Fail to allocate memory\n");
3313 return I40E_ERR_NO_MEMORY;
3318 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3319 memset(req_list, 0, ele_buff_size);
3321 for (i = 0; i < actual_num; i++) {
3322 (void)rte_memcpy(req_list[i].mac_addr,
3323 &filter[num + i].macaddr, ETH_ADDR_LEN);
3324 req_list[i].vlan_tag =
3325 rte_cpu_to_le_16(filter[num + i].vlan_id);
3326 req_list[i].flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3329 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3331 if (ret != I40E_SUCCESS) {
3332 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter\n");
3336 } while (num < total);
3343 /* Find out specific MAC filter */
3344 static struct i40e_mac_filter *
3345 i40e_find_mac_filter(struct i40e_vsi *vsi,
3346 struct ether_addr *macaddr)
3348 struct i40e_mac_filter *f;
3350 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3351 if (is_same_ether_addr(macaddr, &(f->macaddr)))
3359 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3362 uint32_t vid_idx, vid_bit;
3364 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3365 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3367 if (vsi->vfta[vid_idx] & vid_bit)
3374 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3375 uint16_t vlan_id, bool on)
3377 uint32_t vid_idx, vid_bit;
3379 #define UINT32_BIT_MASK 0x1F
3380 #define VALID_VLAN_BIT_MASK 0xFFF
3381 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3382 * element first, then find the bits it belongs to
3384 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3386 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3389 vsi->vfta[vid_idx] |= vid_bit;
3391 vsi->vfta[vid_idx] &= ~vid_bit;
3395 * Find all vlan options for specific mac addr,
3396 * return with actual vlan found.
3399 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3400 struct i40e_macvlan_filter *mv_f,
3401 int num, struct ether_addr *addr)
3407 * Not to use i40e_find_vlan_filter to decrease the loop time,
3408 * although the code looks complex.
3410 if (num < vsi->vlan_num)
3411 return I40E_ERR_PARAM;
3414 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3416 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3417 if (vsi->vfta[j] & (1 << k)) {
3419 PMD_DRV_LOG(ERR, "vlan number "
3421 return I40E_ERR_PARAM;
3423 (void)rte_memcpy(&mv_f[i].macaddr,
3424 addr, ETH_ADDR_LEN);
3426 j * I40E_UINT32_BIT_SIZE + k;
3432 return I40E_SUCCESS;
3436 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3437 struct i40e_macvlan_filter *mv_f,
3442 struct i40e_mac_filter *f;
3444 if (num < vsi->mac_num)
3445 return I40E_ERR_PARAM;
3447 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3449 PMD_DRV_LOG(ERR, "buffer number not match\n");
3450 return I40E_ERR_PARAM;
3452 (void)rte_memcpy(&mv_f[i].macaddr, &f->macaddr, ETH_ADDR_LEN);
3453 mv_f[i].vlan_id = vlan;
3457 return I40E_SUCCESS;
3461 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3464 struct i40e_mac_filter *f;
3465 struct i40e_macvlan_filter *mv_f;
3466 int ret = I40E_SUCCESS;
3468 if (vsi == NULL || vsi->mac_num == 0)
3469 return I40E_ERR_PARAM;
3471 /* Case that no vlan is set */
3472 if (vsi->vlan_num == 0)
3475 num = vsi->mac_num * vsi->vlan_num;
3477 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3479 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3480 return I40E_ERR_NO_MEMORY;
3484 if (vsi->vlan_num == 0) {
3485 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3486 (void)rte_memcpy(&mv_f[i].macaddr,
3487 &f->macaddr, ETH_ADDR_LEN);
3488 mv_f[i].vlan_id = 0;
3492 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3493 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3494 vsi->vlan_num, &f->macaddr);
3495 if (ret != I40E_SUCCESS)
3501 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3509 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3511 struct i40e_macvlan_filter *mv_f;
3513 int ret = I40E_SUCCESS;
3515 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3516 return I40E_ERR_PARAM;
3518 /* If it's already set, just return */
3519 if (i40e_find_vlan_filter(vsi,vlan))
3520 return I40E_SUCCESS;
3522 mac_num = vsi->mac_num;
3525 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3526 return I40E_ERR_PARAM;
3529 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3532 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3533 return I40E_ERR_NO_MEMORY;
3536 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3538 if (ret != I40E_SUCCESS)
3541 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3543 if (ret != I40E_SUCCESS)
3546 i40e_set_vlan_filter(vsi, vlan, 1);
3556 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3558 struct i40e_macvlan_filter *mv_f;
3560 int ret = I40E_SUCCESS;
3563 * Vlan 0 is the generic filter for untagged packets
3564 * and can't be removed.
3566 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3567 return I40E_ERR_PARAM;
3569 /* If can't find it, just return */
3570 if (!i40e_find_vlan_filter(vsi, vlan))
3571 return I40E_ERR_PARAM;
3573 mac_num = vsi->mac_num;
3576 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr\n");
3577 return I40E_ERR_PARAM;
3580 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3583 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3584 return I40E_ERR_NO_MEMORY;
3587 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3589 if (ret != I40E_SUCCESS)
3592 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
3594 if (ret != I40E_SUCCESS)
3597 /* This is last vlan to remove, replace all mac filter with vlan 0 */
3598 if (vsi->vlan_num == 1) {
3599 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
3600 if (ret != I40E_SUCCESS)
3603 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3604 if (ret != I40E_SUCCESS)
3608 i40e_set_vlan_filter(vsi, vlan, 0);
3618 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3620 struct i40e_mac_filter *f;
3621 struct i40e_macvlan_filter *mv_f;
3623 int ret = I40E_SUCCESS;
3625 /* If it's add and we've config it, return */
3626 f = i40e_find_mac_filter(vsi, addr);
3628 return I40E_SUCCESS;
3631 * If vlan_num is 0, that's the first time to add mac,
3632 * set mask for vlan_id 0.
3634 if (vsi->vlan_num == 0) {
3635 i40e_set_vlan_filter(vsi, 0, 1);
3639 vlan_num = vsi->vlan_num;
3641 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3643 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3644 return I40E_ERR_NO_MEMORY;
3647 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3648 if (ret != I40E_SUCCESS)
3651 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
3652 if (ret != I40E_SUCCESS)
3655 /* Add the mac addr into mac list */
3656 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
3658 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3659 ret = I40E_ERR_NO_MEMORY;
3662 (void)rte_memcpy(&f->macaddr, addr, ETH_ADDR_LEN);
3663 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
3674 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
3676 struct i40e_mac_filter *f;
3677 struct i40e_macvlan_filter *mv_f;
3679 int ret = I40E_SUCCESS;
3681 /* Can't find it, return an error */
3682 f = i40e_find_mac_filter(vsi, addr);
3684 return I40E_ERR_PARAM;
3686 vlan_num = vsi->vlan_num;
3687 if (vlan_num == 0) {
3688 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
3689 return I40E_ERR_PARAM;
3691 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
3693 PMD_DRV_LOG(ERR, "failed to allocate memory\n");
3694 return I40E_ERR_NO_MEMORY;
3697 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
3698 if (ret != I40E_SUCCESS)
3701 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
3702 if (ret != I40E_SUCCESS)
3705 /* Remove the mac addr into mac list */
3706 TAILQ_REMOVE(&vsi->mac_list, f, next);
3716 /* Configure hash enable flags for RSS */
3718 i40e_config_hena(uint64_t flags)
3725 if (flags & ETH_RSS_NONF_IPV4_UDP)
3726 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
3727 if (flags & ETH_RSS_NONF_IPV4_TCP)
3728 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
3729 if (flags & ETH_RSS_NONF_IPV4_SCTP)
3730 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3731 if (flags & ETH_RSS_NONF_IPV4_OTHER)
3732 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
3733 if (flags & ETH_RSS_FRAG_IPV4)
3734 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
3735 if (flags & ETH_RSS_NONF_IPV6_UDP)
3736 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
3737 if (flags & ETH_RSS_NONF_IPV6_TCP)
3738 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
3739 if (flags & ETH_RSS_NONF_IPV6_SCTP)
3740 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3741 if (flags & ETH_RSS_NONF_IPV6_OTHER)
3742 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
3743 if (flags & ETH_RSS_FRAG_IPV6)
3744 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
3745 if (flags & ETH_RSS_L2_PAYLOAD)
3746 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
3751 /* Parse the hash enable flags */
3753 i40e_parse_hena(uint64_t flags)
3755 uint64_t rss_hf = 0;
3760 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
3761 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
3762 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
3763 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
3764 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
3765 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
3766 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
3767 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
3768 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
3769 rss_hf |= ETH_RSS_FRAG_IPV4;
3770 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
3771 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
3772 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
3773 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
3774 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
3775 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
3776 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
3777 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
3778 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
3779 rss_hf |= ETH_RSS_FRAG_IPV6;
3780 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
3781 rss_hf |= ETH_RSS_L2_PAYLOAD;
3788 i40e_pf_disable_rss(struct i40e_pf *pf)
3790 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3793 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3794 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3795 hena &= ~I40E_RSS_HENA_ALL;
3796 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3797 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3798 I40E_WRITE_FLUSH(hw);
3802 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
3805 uint8_t hash_key_len;
3810 hash_key = (uint32_t *)(rss_conf->rss_key);
3811 hash_key_len = rss_conf->rss_key_len;
3812 if (hash_key != NULL && hash_key_len >=
3813 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3814 /* Fill in RSS hash key */
3815 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3816 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
3819 rss_hf = rss_conf->rss_hf;
3820 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3821 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3822 hena &= ~I40E_RSS_HENA_ALL;
3823 hena |= i40e_config_hena(rss_hf);
3824 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
3825 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
3826 I40E_WRITE_FLUSH(hw);
3832 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
3833 struct rte_eth_rss_conf *rss_conf)
3835 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3836 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
3839 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3840 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3841 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
3842 if (rss_hf != 0) /* Enable RSS */
3844 return 0; /* Nothing to do */
3847 if (rss_hf == 0) /* Disable RSS */
3850 return i40e_hw_rss_hash_set(hw, rss_conf);
3854 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
3855 struct rte_eth_rss_conf *rss_conf)
3857 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3858 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
3862 if (hash_key != NULL) {
3863 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3864 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
3865 rss_conf->rss_key_len = i * sizeof(uint32_t);
3867 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
3868 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
3869 rss_conf->rss_hf = i40e_parse_hena(hena);
3876 i40e_pf_config_rss(struct i40e_pf *pf)
3878 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3879 struct rte_eth_rss_conf rss_conf;
3880 uint32_t i, lut = 0;
3881 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
3883 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
3886 lut = (lut << 8) | (j & ((0x1 <<
3887 hw->func_caps.rss_table_entry_width) - 1));
3889 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
3892 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
3893 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
3894 i40e_pf_disable_rss(pf);
3897 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
3898 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
3899 /* Calculate the default hash key */
3900 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
3901 rss_key_default[i] = (uint32_t)rte_rand();
3902 rss_conf.rss_key = (uint8_t *)rss_key_default;
3903 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3907 return i40e_hw_rss_hash_set(hw, &rss_conf);
3911 i40e_pf_config_mq_rx(struct i40e_pf *pf)
3913 if (!pf->dev_data->sriov.active) {
3914 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
3916 i40e_pf_config_rss(pf);
3919 i40e_pf_disable_rss(pf);
3928 i40e_disable_queue(struct i40e_hw *hw, uint16_t q_idx)
3933 /* Disable TX queue */
3934 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3935 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3936 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3937 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 0x1)))
3939 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3941 if (i >= I40E_CHK_Q_ENA_COUNT) {
3942 PMD_DRV_LOG(ERR, "Failed to disable "
3943 "tx queue[%u]\n", q_idx);
3944 return I40E_ERR_TIMEOUT;
3947 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
3948 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3949 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3950 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3951 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3952 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3953 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3954 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3957 if (i >= I40E_CHK_Q_ENA_COUNT) {
3958 PMD_DRV_LOG(ERR, "Failed to disable "
3959 "tx queue[%u]\n", q_idx);
3960 return I40E_ERR_TIMEOUT;
3964 /* Disable RX queue */
3965 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3966 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3967 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3968 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3970 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3972 if (i >= I40E_CHK_Q_ENA_COUNT) {
3973 PMD_DRV_LOG(ERR, "Failed to disable "
3974 "rx queue[%u]\n", q_idx);
3975 return I40E_ERR_TIMEOUT;
3978 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
3979 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3980 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3981 for (i = 0; i < I40E_CHK_Q_ENA_COUNT; i++) {
3982 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3983 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3984 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3985 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3988 if (i >= I40E_CHK_Q_ENA_COUNT) {
3989 PMD_DRV_LOG(ERR, "Failed to disable "
3990 "rx queue[%u]\n", q_idx);
3991 return I40E_ERR_TIMEOUT;
3995 return I40E_SUCCESS;
3999 i40e_pf_disable_all_queues(struct i40e_hw *hw)
4002 uint16_t firstq, lastq, maxq, i;
4004 reg = I40E_READ_REG(hw, I40E_PFLAN_QALLOC);
4005 if (!(reg & I40E_PFLAN_QALLOC_VALID_MASK)) {
4006 PMD_DRV_LOG(INFO, "PF queue allocation is invalid\n");
4007 return I40E_ERR_PARAM;
4009 firstq = reg & I40E_PFLAN_QALLOC_FIRSTQ_MASK;
4010 lastq = (reg & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4011 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4012 maxq = lastq - firstq;
4013 for (i = 0; i <= maxq; i++) {
4014 ret = i40e_disable_queue(hw, i);
4015 if (ret != I40E_SUCCESS)
4018 return I40E_SUCCESS;