4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
51 #include <rte_eth_ctrl.h>
53 #include "i40e_logs.h"
54 #include "i40e/i40e_prototype.h"
55 #include "i40e/i40e_adminq_cmd.h"
56 #include "i40e/i40e_type.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
61 #define I40E_DEFAULT_RX_FREE_THRESH 32
62 #define I40E_DEFAULT_RX_PTHRESH 8
63 #define I40E_DEFAULT_RX_HTHRESH 8
64 #define I40E_DEFAULT_RX_WTHRESH 0
66 #define I40E_DEFAULT_TX_FREE_THRESH 32
67 #define I40E_DEFAULT_TX_PTHRESH 32
68 #define I40E_DEFAULT_TX_HTHRESH 0
69 #define I40E_DEFAULT_TX_WTHRESH 0
70 #define I40E_DEFAULT_TX_RSBIT_THRESH 32
72 /* Maximun number of MAC addresses */
73 #define I40E_NUM_MACADDR_MAX 64
74 #define I40E_CLEAR_PXE_WAIT_MS 200
76 /* Maximun number of capability elements */
77 #define I40E_MAX_CAP_ELE_NUM 128
79 /* Wait count and inteval */
80 #define I40E_CHK_Q_ENA_COUNT 1000
81 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
83 /* Maximun number of VSI */
84 #define I40E_MAX_NUM_VSIS (384UL)
86 /* Bit shift and mask */
87 #define I40E_16_BIT_SHIFT 16
88 #define I40E_16_BIT_MASK 0xFFFF
89 #define I40E_32_BIT_SHIFT 32
90 #define I40E_32_BIT_MASK 0xFFFFFFFF
91 #define I40E_48_BIT_SHIFT 48
92 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
94 /* Default queue interrupt throttling time in microseconds*/
95 #define I40E_ITR_INDEX_DEFAULT 0
96 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
97 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
99 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
101 static int eth_i40e_dev_init(\
102 __attribute__((unused)) struct eth_driver *eth_drv,
103 struct rte_eth_dev *eth_dev);
104 static int i40e_dev_configure(struct rte_eth_dev *dev);
105 static int i40e_dev_start(struct rte_eth_dev *dev);
106 static void i40e_dev_stop(struct rte_eth_dev *dev);
107 static void i40e_dev_close(struct rte_eth_dev *dev);
108 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
109 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
110 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
111 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
112 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
113 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
114 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
115 struct rte_eth_stats *stats);
116 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
117 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
121 static void i40e_dev_info_get(struct rte_eth_dev *dev,
122 struct rte_eth_dev_info *dev_info);
123 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
126 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
127 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
128 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
131 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
132 static int i40e_dev_led_on(struct rte_eth_dev *dev);
133 static int i40e_dev_led_off(struct rte_eth_dev *dev);
134 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
135 struct rte_eth_fc_conf *fc_conf);
136 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
137 struct rte_eth_pfc_conf *pfc_conf);
138 static void i40e_macaddr_add(struct rte_eth_dev *dev,
139 struct ether_addr *mac_addr,
142 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
143 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
144 struct rte_eth_rss_reta *reta_conf);
145 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
146 struct rte_eth_rss_reta *reta_conf);
148 static int i40e_get_cap(struct i40e_hw *hw);
149 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
150 static int i40e_pf_setup(struct i40e_pf *pf);
151 static int i40e_vsi_init(struct i40e_vsi *vsi);
152 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
153 bool offset_loaded, uint64_t *offset, uint64_t *stat);
154 static void i40e_stat_update_48(struct i40e_hw *hw,
160 static void i40e_pf_config_irq0(struct i40e_hw *hw);
161 static void i40e_dev_interrupt_handler(
162 __rte_unused struct rte_intr_handle *handle, void *param);
163 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
164 uint32_t base, uint32_t num);
165 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
166 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
168 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
170 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
171 static int i40e_veb_release(struct i40e_veb *veb);
172 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
173 struct i40e_vsi *vsi);
174 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
175 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
176 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
177 struct i40e_macvlan_filter *mv_f,
179 struct ether_addr *addr);
180 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
181 struct i40e_macvlan_filter *mv_f,
184 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
185 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
186 struct rte_eth_rss_conf *rss_conf);
187 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
188 struct rte_eth_rss_conf *rss_conf);
189 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
190 struct rte_eth_udp_tunnel *udp_tunnel);
191 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
192 struct rte_eth_udp_tunnel *udp_tunnel);
193 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
194 enum rte_filter_type filter_type,
195 enum rte_filter_op filter_op,
198 /* Default hash key buffer for RSS */
199 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
201 static struct rte_pci_id pci_id_i40e_map[] = {
202 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
203 #include "rte_pci_dev_ids.h"
204 { .vendor_id = 0, /* sentinel */ },
207 static struct eth_dev_ops i40e_eth_dev_ops = {
208 .dev_configure = i40e_dev_configure,
209 .dev_start = i40e_dev_start,
210 .dev_stop = i40e_dev_stop,
211 .dev_close = i40e_dev_close,
212 .promiscuous_enable = i40e_dev_promiscuous_enable,
213 .promiscuous_disable = i40e_dev_promiscuous_disable,
214 .allmulticast_enable = i40e_dev_allmulticast_enable,
215 .allmulticast_disable = i40e_dev_allmulticast_disable,
216 .dev_set_link_up = i40e_dev_set_link_up,
217 .dev_set_link_down = i40e_dev_set_link_down,
218 .link_update = i40e_dev_link_update,
219 .stats_get = i40e_dev_stats_get,
220 .stats_reset = i40e_dev_stats_reset,
221 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
222 .dev_infos_get = i40e_dev_info_get,
223 .vlan_filter_set = i40e_vlan_filter_set,
224 .vlan_tpid_set = i40e_vlan_tpid_set,
225 .vlan_offload_set = i40e_vlan_offload_set,
226 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
227 .vlan_pvid_set = i40e_vlan_pvid_set,
228 .rx_queue_start = i40e_dev_rx_queue_start,
229 .rx_queue_stop = i40e_dev_rx_queue_stop,
230 .tx_queue_start = i40e_dev_tx_queue_start,
231 .tx_queue_stop = i40e_dev_tx_queue_stop,
232 .rx_queue_setup = i40e_dev_rx_queue_setup,
233 .rx_queue_release = i40e_dev_rx_queue_release,
234 .rx_queue_count = i40e_dev_rx_queue_count,
235 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
236 .tx_queue_setup = i40e_dev_tx_queue_setup,
237 .tx_queue_release = i40e_dev_tx_queue_release,
238 .dev_led_on = i40e_dev_led_on,
239 .dev_led_off = i40e_dev_led_off,
240 .flow_ctrl_set = i40e_flow_ctrl_set,
241 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
242 .mac_addr_add = i40e_macaddr_add,
243 .mac_addr_remove = i40e_macaddr_remove,
244 .reta_update = i40e_dev_rss_reta_update,
245 .reta_query = i40e_dev_rss_reta_query,
246 .rss_hash_update = i40e_dev_rss_hash_update,
247 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
248 .udp_tunnel_add = i40e_dev_udp_tunnel_add,
249 .udp_tunnel_del = i40e_dev_udp_tunnel_del,
250 .filter_ctrl = i40e_dev_filter_ctrl,
253 static struct eth_driver rte_i40e_pmd = {
255 .name = "rte_i40e_pmd",
256 .id_table = pci_id_i40e_map,
257 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
259 .eth_dev_init = eth_i40e_dev_init,
260 .dev_private_size = sizeof(struct i40e_adapter),
264 i40e_prev_power_of_2(int n)
282 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
283 struct rte_eth_link *link)
285 struct rte_eth_link *dst = link;
286 struct rte_eth_link *src = &(dev->data->dev_link);
288 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
289 *(uint64_t *)src) == 0)
296 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
297 struct rte_eth_link *link)
299 struct rte_eth_link *dst = &(dev->data->dev_link);
300 struct rte_eth_link *src = link;
302 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
303 *(uint64_t *)src) == 0)
310 * Driver initialization routine.
311 * Invoked once at EAL init time.
312 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
315 rte_i40e_pmd_init(const char *name __rte_unused,
316 const char *params __rte_unused)
318 PMD_INIT_FUNC_TRACE();
319 rte_eth_driver_register(&rte_i40e_pmd);
324 static struct rte_driver rte_i40e_driver = {
326 .init = rte_i40e_pmd_init,
329 PMD_REGISTER_DRIVER(rte_i40e_driver);
332 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
333 struct rte_eth_dev *dev)
335 struct rte_pci_device *pci_dev;
336 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
337 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
338 struct i40e_vsi *vsi;
343 PMD_INIT_FUNC_TRACE();
345 dev->dev_ops = &i40e_eth_dev_ops;
346 dev->rx_pkt_burst = i40e_recv_pkts;
347 dev->tx_pkt_burst = i40e_xmit_pkts;
349 /* for secondary processes, we don't initialise any further as primary
350 * has already done this work. Only check we don't need a different
352 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
353 if (dev->data->scattered_rx)
354 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
357 pci_dev = dev->pci_dev;
358 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
359 pf->adapter->eth_dev = dev;
360 pf->dev_data = dev->data;
362 hw->back = I40E_PF_TO_ADAPTER(pf);
363 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
365 PMD_INIT_LOG(ERR, "Hardware is not available, "
366 "as address is NULL");
370 hw->vendor_id = pci_dev->id.vendor_id;
371 hw->device_id = pci_dev->id.device_id;
372 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
373 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
374 hw->bus.device = pci_dev->addr.devid;
375 hw->bus.func = pci_dev->addr.function;
377 /* Make sure all is clean before doing PF reset */
380 /* Reset here to make sure all is clean for each PF */
381 ret = i40e_pf_reset(hw);
383 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
387 /* Initialize the shared code (base driver) */
388 ret = i40e_init_shared_code(hw);
390 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
394 /* Initialize the parameters for adminq */
395 i40e_init_adminq_parameter(hw);
396 ret = i40e_init_adminq(hw);
397 if (ret != I40E_SUCCESS) {
398 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
401 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
402 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
403 hw->aq.api_maj_ver, hw->aq.api_min_ver,
404 ((hw->nvm.version >> 12) & 0xf),
405 ((hw->nvm.version >> 4) & 0xff),
406 (hw->nvm.version & 0xf), hw->nvm.eetrack);
409 ret = i40e_aq_stop_lldp(hw, true, NULL);
410 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
411 PMD_INIT_LOG(INFO, "Failed to stop lldp");
414 i40e_clear_pxe_mode(hw);
416 /* Get hw capabilities */
417 ret = i40e_get_cap(hw);
418 if (ret != I40E_SUCCESS) {
419 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
420 goto err_get_capabilities;
423 /* Initialize parameters for PF */
424 ret = i40e_pf_parameter_init(dev);
426 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
427 goto err_parameter_init;
430 /* Initialize the queue management */
431 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
433 PMD_INIT_LOG(ERR, "Failed to init queue pool");
434 goto err_qp_pool_init;
436 ret = i40e_res_pool_init(&pf->msix_pool, 1,
437 hw->func_caps.num_msix_vectors - 1);
439 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
440 goto err_msix_pool_init;
443 /* Initialize lan hmc */
444 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
445 hw->func_caps.num_rx_qp, 0, 0);
446 if (ret != I40E_SUCCESS) {
447 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
448 goto err_init_lan_hmc;
451 /* Configure lan hmc */
452 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
453 if (ret != I40E_SUCCESS) {
454 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
455 goto err_configure_lan_hmc;
458 /* Get and check the mac address */
459 i40e_get_mac_addr(hw, hw->mac.addr);
460 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
461 PMD_INIT_LOG(ERR, "mac address is not valid");
463 goto err_get_mac_addr;
465 /* Copy the permanent MAC address */
466 ether_addr_copy((struct ether_addr *) hw->mac.addr,
467 (struct ether_addr *) hw->mac.perm_addr);
469 /* Disable flow control */
470 hw->fc.requested_mode = I40E_FC_NONE;
471 i40e_set_fc(hw, &aq_fail, TRUE);
473 /* PF setup, which includes VSI setup */
474 ret = i40e_pf_setup(pf);
476 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
477 goto err_setup_pf_switch;
482 /* Disable double vlan by default */
483 i40e_vsi_config_double_vlan(vsi, FALSE);
485 if (!vsi->max_macaddrs)
486 len = ETHER_ADDR_LEN;
488 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
490 /* Should be after VSI initialized */
491 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
492 if (!dev->data->mac_addrs) {
493 PMD_INIT_LOG(ERR, "Failed to allocated memory "
494 "for storing mac address");
495 goto err_get_mac_addr;
497 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
498 &dev->data->mac_addrs[0]);
500 /* initialize pf host driver to setup SRIOV resource if applicable */
501 i40e_pf_host_init(dev);
503 /* register callback func to eal lib */
504 rte_intr_callback_register(&(pci_dev->intr_handle),
505 i40e_dev_interrupt_handler, (void *)dev);
507 /* configure and enable device interrupt */
508 i40e_pf_config_irq0(hw);
509 i40e_pf_enable_irq0(hw);
511 /* enable uio intr after callback register */
512 rte_intr_enable(&(pci_dev->intr_handle));
517 rte_free(pf->main_vsi);
519 err_configure_lan_hmc:
520 (void)i40e_shutdown_lan_hmc(hw);
522 i40e_res_pool_destroy(&pf->msix_pool);
524 i40e_res_pool_destroy(&pf->qp_pool);
527 err_get_capabilities:
528 (void)i40e_shutdown_adminq(hw);
534 i40e_dev_configure(struct rte_eth_dev *dev)
536 return i40e_dev_init_vlan(dev);
540 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
542 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
543 uint16_t msix_vect = vsi->msix_intr;
546 for (i = 0; i < vsi->nb_qps; i++) {
547 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
548 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
552 if (vsi->type != I40E_VSI_SRIOV) {
553 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
554 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
558 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
559 vsi->user_param + (msix_vect - 1);
561 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
563 I40E_WRITE_FLUSH(hw);
566 static inline uint16_t
567 i40e_calc_itr_interval(int16_t interval)
569 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
570 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
572 /* Convert to hardware count, as writing each 1 represents 2 us */
577 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
580 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
581 uint16_t msix_vect = vsi->msix_intr;
584 for (i = 0; i < vsi->nb_qps; i++)
585 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
587 /* Bind all RX queues to allocated MSIX interrupt */
588 for (i = 0; i < vsi->nb_qps; i++) {
589 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
590 I40E_QINT_RQCTL_ITR_INDX_MASK |
591 ((vsi->base_queue + i + 1) <<
592 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
593 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
594 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
596 if (i == vsi->nb_qps - 1)
597 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
598 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
601 /* Write first RX queue to Link list register as the head element */
602 if (vsi->type != I40E_VSI_SRIOV) {
604 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
606 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
608 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
609 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
611 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
612 msix_vect - 1), interval);
614 #ifndef I40E_GLINT_CTL
615 #define I40E_GLINT_CTL 0x0003F800
616 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
618 /* Disable auto-mask on enabling of all none-zero interrupt */
619 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
620 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
624 /* num_msix_vectors_vf needs to minus irq0 */
625 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
626 vsi->user_param + (msix_vect - 1);
628 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
629 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
630 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
633 I40E_WRITE_FLUSH(hw);
637 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
639 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
640 uint16_t interval = i40e_calc_itr_interval(\
641 RTE_LIBRTE_I40E_ITR_INTERVAL);
643 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
644 I40E_PFINT_DYN_CTLN_INTENA_MASK |
645 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
646 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
647 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
651 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
653 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
655 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
658 static inline uint8_t
659 i40e_parse_link_speed(uint16_t eth_link_speed)
661 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
663 switch (eth_link_speed) {
664 case ETH_LINK_SPEED_40G:
665 link_speed = I40E_LINK_SPEED_40GB;
667 case ETH_LINK_SPEED_20G:
668 link_speed = I40E_LINK_SPEED_20GB;
670 case ETH_LINK_SPEED_10G:
671 link_speed = I40E_LINK_SPEED_10GB;
673 case ETH_LINK_SPEED_1000:
674 link_speed = I40E_LINK_SPEED_1GB;
676 case ETH_LINK_SPEED_100:
677 link_speed = I40E_LINK_SPEED_100MB;
685 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
687 enum i40e_status_code status;
688 struct i40e_aq_get_phy_abilities_resp phy_ab;
689 struct i40e_aq_set_phy_config phy_conf;
690 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
691 I40E_AQ_PHY_FLAG_PAUSE_RX |
692 I40E_AQ_PHY_FLAG_LOW_POWER;
693 const uint8_t advt = I40E_LINK_SPEED_40GB |
694 I40E_LINK_SPEED_10GB |
695 I40E_LINK_SPEED_1GB |
696 I40E_LINK_SPEED_100MB;
699 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
704 memset(&phy_conf, 0, sizeof(phy_conf));
706 /* bits 0-2 use the values from get_phy_abilities_resp */
708 abilities |= phy_ab.abilities & mask;
710 /* update ablities and speed */
711 if (abilities & I40E_AQ_PHY_AN_ENABLED)
712 phy_conf.link_speed = advt;
714 phy_conf.link_speed = force_speed;
716 phy_conf.abilities = abilities;
718 /* use get_phy_abilities_resp value for the rest */
719 phy_conf.phy_type = phy_ab.phy_type;
720 phy_conf.eee_capability = phy_ab.eee_capability;
721 phy_conf.eeer = phy_ab.eeer_val;
722 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
724 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
725 phy_ab.abilities, phy_ab.link_speed);
726 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
727 phy_conf.abilities, phy_conf.link_speed);
729 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
737 i40e_apply_link_speed(struct rte_eth_dev *dev)
740 uint8_t abilities = 0;
741 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
742 struct rte_eth_conf *conf = &dev->data->dev_conf;
744 speed = i40e_parse_link_speed(conf->link_speed);
745 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
746 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
747 abilities |= I40E_AQ_PHY_AN_ENABLED;
749 abilities |= I40E_AQ_PHY_LINK_ENABLED;
751 return i40e_phy_conf_link(hw, abilities, speed);
755 i40e_dev_start(struct rte_eth_dev *dev)
757 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
758 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
759 struct i40e_vsi *vsi = pf->main_vsi;
762 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
763 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
764 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
765 dev->data->dev_conf.link_duplex,
771 ret = i40e_vsi_init(vsi);
772 if (ret != I40E_SUCCESS) {
773 PMD_DRV_LOG(ERR, "Failed to init VSI");
777 /* Map queues with MSIX interrupt */
778 i40e_vsi_queues_bind_intr(vsi);
779 i40e_vsi_enable_queues_intr(vsi);
781 /* Enable all queues which have been configured */
782 ret = i40e_vsi_switch_queues(vsi, TRUE);
783 if (ret != I40E_SUCCESS) {
784 PMD_DRV_LOG(ERR, "Failed to enable VSI");
788 /* Enable receiving broadcast packets */
789 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
790 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
791 if (ret != I40E_SUCCESS)
792 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
795 /* Apply link configure */
796 ret = i40e_apply_link_speed(dev);
797 if (I40E_SUCCESS != ret) {
798 PMD_DRV_LOG(ERR, "Fail to apply link setting");
805 i40e_vsi_switch_queues(vsi, FALSE);
811 i40e_dev_stop(struct rte_eth_dev *dev)
813 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
814 struct i40e_vsi *vsi = pf->main_vsi;
816 /* Disable all queues */
817 i40e_vsi_switch_queues(vsi, FALSE);
820 i40e_dev_set_link_down(dev);
822 /* un-map queues with interrupt registers */
823 i40e_vsi_disable_queues_intr(vsi);
824 i40e_vsi_queues_unbind_intr(vsi);
828 i40e_dev_close(struct rte_eth_dev *dev)
830 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
831 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
834 PMD_INIT_FUNC_TRACE();
838 /* Disable interrupt */
839 i40e_pf_disable_irq0(hw);
840 rte_intr_disable(&(dev->pci_dev->intr_handle));
842 /* shutdown and destroy the HMC */
843 i40e_shutdown_lan_hmc(hw);
845 /* release all the existing VSIs and VEBs */
846 i40e_vsi_release(pf->main_vsi);
848 /* shutdown the adminq */
849 i40e_aq_queue_shutdown(hw, true);
850 i40e_shutdown_adminq(hw);
852 i40e_res_pool_destroy(&pf->qp_pool);
853 i40e_res_pool_destroy(&pf->msix_pool);
855 /* force a PF reset to clean anything leftover */
856 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
857 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
858 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
859 I40E_WRITE_FLUSH(hw);
863 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
865 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
866 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
867 struct i40e_vsi *vsi = pf->main_vsi;
870 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
872 if (status != I40E_SUCCESS)
873 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
875 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
877 if (status != I40E_SUCCESS)
878 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
883 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
885 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
886 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887 struct i40e_vsi *vsi = pf->main_vsi;
890 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
892 if (status != I40E_SUCCESS)
893 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
895 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
897 if (status != I40E_SUCCESS)
898 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
902 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
904 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
905 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
906 struct i40e_vsi *vsi = pf->main_vsi;
909 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
910 if (ret != I40E_SUCCESS)
911 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
915 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
917 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
918 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
919 struct i40e_vsi *vsi = pf->main_vsi;
922 if (dev->data->promiscuous == 1)
923 return; /* must remain in all_multicast mode */
925 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
926 vsi->seid, FALSE, NULL);
927 if (ret != I40E_SUCCESS)
928 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
932 * Set device link up.
935 i40e_dev_set_link_up(struct rte_eth_dev *dev)
937 /* re-apply link speed setting */
938 return i40e_apply_link_speed(dev);
942 * Set device link down.
945 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
947 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
948 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
949 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951 return i40e_phy_conf_link(hw, abilities, speed);
955 i40e_dev_link_update(struct rte_eth_dev *dev,
956 __rte_unused int wait_to_complete)
958 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
959 struct i40e_link_status link_status;
960 struct rte_eth_link link, old;
963 memset(&link, 0, sizeof(link));
964 memset(&old, 0, sizeof(old));
965 memset(&link_status, 0, sizeof(link_status));
966 rte_i40e_dev_atomic_read_link_status(dev, &old);
968 /* Get link status information from hardware */
969 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
970 if (status != I40E_SUCCESS) {
971 link.link_speed = ETH_LINK_SPEED_100;
972 link.link_duplex = ETH_LINK_FULL_DUPLEX;
973 PMD_DRV_LOG(ERR, "Failed to get link info");
977 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
979 if (!link.link_status)
982 /* i40e uses full duplex only */
983 link.link_duplex = ETH_LINK_FULL_DUPLEX;
985 /* Parse the link status */
986 switch (link_status.link_speed) {
987 case I40E_LINK_SPEED_100MB:
988 link.link_speed = ETH_LINK_SPEED_100;
990 case I40E_LINK_SPEED_1GB:
991 link.link_speed = ETH_LINK_SPEED_1000;
993 case I40E_LINK_SPEED_10GB:
994 link.link_speed = ETH_LINK_SPEED_10G;
996 case I40E_LINK_SPEED_20GB:
997 link.link_speed = ETH_LINK_SPEED_20G;
999 case I40E_LINK_SPEED_40GB:
1000 link.link_speed = ETH_LINK_SPEED_40G;
1003 link.link_speed = ETH_LINK_SPEED_100;
1008 rte_i40e_dev_atomic_write_link_status(dev, &link);
1009 if (link.link_status == old.link_status)
1015 /* Get all the statistics of a VSI */
1017 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1019 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1020 struct i40e_eth_stats *nes = &vsi->eth_stats;
1021 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1022 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1024 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1025 vsi->offset_loaded, &oes->rx_bytes,
1027 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1028 vsi->offset_loaded, &oes->rx_unicast,
1030 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1031 vsi->offset_loaded, &oes->rx_multicast,
1032 &nes->rx_multicast);
1033 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1034 vsi->offset_loaded, &oes->rx_broadcast,
1035 &nes->rx_broadcast);
1036 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1037 &oes->rx_discards, &nes->rx_discards);
1038 /* GLV_REPC not supported */
1039 /* GLV_RMPC not supported */
1040 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1041 &oes->rx_unknown_protocol,
1042 &nes->rx_unknown_protocol);
1043 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1044 vsi->offset_loaded, &oes->tx_bytes,
1046 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1047 vsi->offset_loaded, &oes->tx_unicast,
1049 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1050 vsi->offset_loaded, &oes->tx_multicast,
1051 &nes->tx_multicast);
1052 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1053 vsi->offset_loaded, &oes->tx_broadcast,
1054 &nes->tx_broadcast);
1055 /* GLV_TDPC not supported */
1056 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1057 &oes->tx_errors, &nes->tx_errors);
1058 vsi->offset_loaded = true;
1060 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1062 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
1063 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
1064 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
1065 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
1066 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
1067 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1068 nes->rx_unknown_protocol);
1069 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
1070 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
1071 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
1072 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
1073 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
1074 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
1075 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1079 /* Get all statistics of a port */
1081 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1084 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1085 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1086 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1087 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1089 /* Get statistics of struct i40e_eth_stats */
1090 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1091 I40E_GLPRT_GORCL(hw->port),
1092 pf->offset_loaded, &os->eth.rx_bytes,
1094 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1095 I40E_GLPRT_UPRCL(hw->port),
1096 pf->offset_loaded, &os->eth.rx_unicast,
1097 &ns->eth.rx_unicast);
1098 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1099 I40E_GLPRT_MPRCL(hw->port),
1100 pf->offset_loaded, &os->eth.rx_multicast,
1101 &ns->eth.rx_multicast);
1102 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1103 I40E_GLPRT_BPRCL(hw->port),
1104 pf->offset_loaded, &os->eth.rx_broadcast,
1105 &ns->eth.rx_broadcast);
1106 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1107 pf->offset_loaded, &os->eth.rx_discards,
1108 &ns->eth.rx_discards);
1109 /* GLPRT_REPC not supported */
1110 /* GLPRT_RMPC not supported */
1111 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1113 &os->eth.rx_unknown_protocol,
1114 &ns->eth.rx_unknown_protocol);
1115 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1116 I40E_GLPRT_GOTCL(hw->port),
1117 pf->offset_loaded, &os->eth.tx_bytes,
1119 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1120 I40E_GLPRT_UPTCL(hw->port),
1121 pf->offset_loaded, &os->eth.tx_unicast,
1122 &ns->eth.tx_unicast);
1123 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1124 I40E_GLPRT_MPTCL(hw->port),
1125 pf->offset_loaded, &os->eth.tx_multicast,
1126 &ns->eth.tx_multicast);
1127 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1128 I40E_GLPRT_BPTCL(hw->port),
1129 pf->offset_loaded, &os->eth.tx_broadcast,
1130 &ns->eth.tx_broadcast);
1131 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1132 pf->offset_loaded, &os->eth.tx_discards,
1133 &ns->eth.tx_discards);
1134 /* GLPRT_TEPC not supported */
1136 /* additional port specific stats */
1137 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1138 pf->offset_loaded, &os->tx_dropped_link_down,
1139 &ns->tx_dropped_link_down);
1140 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1141 pf->offset_loaded, &os->crc_errors,
1143 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1144 pf->offset_loaded, &os->illegal_bytes,
1145 &ns->illegal_bytes);
1146 /* GLPRT_ERRBC not supported */
1147 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1148 pf->offset_loaded, &os->mac_local_faults,
1149 &ns->mac_local_faults);
1150 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1151 pf->offset_loaded, &os->mac_remote_faults,
1152 &ns->mac_remote_faults);
1153 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1154 pf->offset_loaded, &os->rx_length_errors,
1155 &ns->rx_length_errors);
1156 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1157 pf->offset_loaded, &os->link_xon_rx,
1159 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1160 pf->offset_loaded, &os->link_xoff_rx,
1162 for (i = 0; i < 8; i++) {
1163 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1165 &os->priority_xon_rx[i],
1166 &ns->priority_xon_rx[i]);
1167 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1169 &os->priority_xoff_rx[i],
1170 &ns->priority_xoff_rx[i]);
1172 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1173 pf->offset_loaded, &os->link_xon_tx,
1175 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1176 pf->offset_loaded, &os->link_xoff_tx,
1178 for (i = 0; i < 8; i++) {
1179 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1181 &os->priority_xon_tx[i],
1182 &ns->priority_xon_tx[i]);
1183 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1185 &os->priority_xoff_tx[i],
1186 &ns->priority_xoff_tx[i]);
1187 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1189 &os->priority_xon_2_xoff[i],
1190 &ns->priority_xon_2_xoff[i]);
1192 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1193 I40E_GLPRT_PRC64L(hw->port),
1194 pf->offset_loaded, &os->rx_size_64,
1196 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1197 I40E_GLPRT_PRC127L(hw->port),
1198 pf->offset_loaded, &os->rx_size_127,
1200 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1201 I40E_GLPRT_PRC255L(hw->port),
1202 pf->offset_loaded, &os->rx_size_255,
1204 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1205 I40E_GLPRT_PRC511L(hw->port),
1206 pf->offset_loaded, &os->rx_size_511,
1208 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1209 I40E_GLPRT_PRC1023L(hw->port),
1210 pf->offset_loaded, &os->rx_size_1023,
1212 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1213 I40E_GLPRT_PRC1522L(hw->port),
1214 pf->offset_loaded, &os->rx_size_1522,
1216 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1217 I40E_GLPRT_PRC9522L(hw->port),
1218 pf->offset_loaded, &os->rx_size_big,
1220 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1221 pf->offset_loaded, &os->rx_undersize,
1223 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1224 pf->offset_loaded, &os->rx_fragments,
1226 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1227 pf->offset_loaded, &os->rx_oversize,
1229 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1230 pf->offset_loaded, &os->rx_jabber,
1232 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1233 I40E_GLPRT_PTC64L(hw->port),
1234 pf->offset_loaded, &os->tx_size_64,
1236 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1237 I40E_GLPRT_PTC127L(hw->port),
1238 pf->offset_loaded, &os->tx_size_127,
1240 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1241 I40E_GLPRT_PTC255L(hw->port),
1242 pf->offset_loaded, &os->tx_size_255,
1244 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1245 I40E_GLPRT_PTC511L(hw->port),
1246 pf->offset_loaded, &os->tx_size_511,
1248 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1249 I40E_GLPRT_PTC1023L(hw->port),
1250 pf->offset_loaded, &os->tx_size_1023,
1252 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1253 I40E_GLPRT_PTC1522L(hw->port),
1254 pf->offset_loaded, &os->tx_size_1522,
1256 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1257 I40E_GLPRT_PTC9522L(hw->port),
1258 pf->offset_loaded, &os->tx_size_big,
1260 /* GLPRT_MSPDC not supported */
1261 /* GLPRT_XEC not supported */
1263 pf->offset_loaded = true;
1266 i40e_update_vsi_stats(pf->main_vsi);
1268 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1269 ns->eth.rx_broadcast;
1270 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1271 ns->eth.tx_broadcast;
1272 stats->ibytes = ns->eth.rx_bytes;
1273 stats->obytes = ns->eth.tx_bytes;
1274 stats->oerrors = ns->eth.tx_errors;
1275 stats->imcasts = ns->eth.rx_multicast;
1278 stats->ibadcrc = ns->crc_errors;
1279 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
1280 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1281 stats->imissed = ns->eth.rx_discards;
1282 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
1284 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1285 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
1286 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
1287 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
1288 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
1289 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
1290 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1291 ns->eth.rx_unknown_protocol);
1292 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
1293 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
1294 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
1295 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
1296 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
1297 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
1299 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
1300 ns->tx_dropped_link_down);
1301 PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
1302 PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
1304 PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
1305 PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
1306 ns->mac_local_faults);
1307 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
1308 ns->mac_remote_faults);
1309 PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
1310 ns->rx_length_errors);
1311 PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
1312 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
1313 for (i = 0; i < 8; i++) {
1314 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
1315 i, ns->priority_xon_rx[i]);
1316 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
1317 i, ns->priority_xoff_rx[i]);
1319 PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
1320 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
1321 for (i = 0; i < 8; i++) {
1322 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
1323 i, ns->priority_xon_tx[i]);
1324 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
1325 i, ns->priority_xoff_tx[i]);
1326 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
1327 i, ns->priority_xon_2_xoff[i]);
1329 PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
1330 PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
1331 PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
1332 PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
1333 PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
1334 PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
1335 PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
1336 PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
1337 PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
1338 PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
1339 PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
1340 PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
1341 PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
1342 PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
1343 PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
1344 PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
1345 PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
1346 PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
1347 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1348 ns->mac_short_packet_dropped);
1349 PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
1350 ns->checksum_error);
1351 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1354 /* Reset the statistics */
1356 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1358 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1360 /* It results in reloading the start point of each counter */
1361 pf->offset_loaded = false;
1365 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1366 __rte_unused uint16_t queue_id,
1367 __rte_unused uint8_t stat_idx,
1368 __rte_unused uint8_t is_rx)
1370 PMD_INIT_FUNC_TRACE();
1376 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1378 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1379 struct i40e_vsi *vsi = pf->main_vsi;
1381 dev_info->max_rx_queues = vsi->nb_qps;
1382 dev_info->max_tx_queues = vsi->nb_qps;
1383 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1384 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1385 dev_info->max_mac_addrs = vsi->max_macaddrs;
1386 dev_info->max_vfs = dev->pci_dev->max_vfs;
1387 dev_info->rx_offload_capa =
1388 DEV_RX_OFFLOAD_VLAN_STRIP |
1389 DEV_RX_OFFLOAD_IPV4_CKSUM |
1390 DEV_RX_OFFLOAD_UDP_CKSUM |
1391 DEV_RX_OFFLOAD_TCP_CKSUM;
1392 dev_info->tx_offload_capa =
1393 DEV_TX_OFFLOAD_VLAN_INSERT |
1394 DEV_TX_OFFLOAD_IPV4_CKSUM |
1395 DEV_TX_OFFLOAD_UDP_CKSUM |
1396 DEV_TX_OFFLOAD_TCP_CKSUM |
1397 DEV_TX_OFFLOAD_SCTP_CKSUM;
1399 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1401 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1402 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1403 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1405 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1409 dev_info->default_txconf = (struct rte_eth_txconf) {
1411 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1412 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1413 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1415 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1416 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1417 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
1423 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1425 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1426 struct i40e_vsi *vsi = pf->main_vsi;
1427 PMD_INIT_FUNC_TRACE();
1430 return i40e_vsi_add_vlan(vsi, vlan_id);
1432 return i40e_vsi_delete_vlan(vsi, vlan_id);
1436 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1437 __rte_unused uint16_t tpid)
1439 PMD_INIT_FUNC_TRACE();
1443 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1445 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1446 struct i40e_vsi *vsi = pf->main_vsi;
1448 if (mask & ETH_VLAN_STRIP_MASK) {
1449 /* Enable or disable VLAN stripping */
1450 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1451 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1453 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1456 if (mask & ETH_VLAN_EXTEND_MASK) {
1457 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1458 i40e_vsi_config_double_vlan(vsi, TRUE);
1460 i40e_vsi_config_double_vlan(vsi, FALSE);
1465 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1466 __rte_unused uint16_t queue,
1467 __rte_unused int on)
1469 PMD_INIT_FUNC_TRACE();
1473 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1475 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1476 struct i40e_vsi *vsi = pf->main_vsi;
1477 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1478 struct i40e_vsi_vlan_pvid_info info;
1480 memset(&info, 0, sizeof(info));
1483 info.config.pvid = pvid;
1485 info.config.reject.tagged =
1486 data->dev_conf.txmode.hw_vlan_reject_tagged;
1487 info.config.reject.untagged =
1488 data->dev_conf.txmode.hw_vlan_reject_untagged;
1491 return i40e_vsi_vlan_pvid_set(vsi, &info);
1495 i40e_dev_led_on(struct rte_eth_dev *dev)
1497 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1498 uint32_t mode = i40e_led_get(hw);
1501 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1507 i40e_dev_led_off(struct rte_eth_dev *dev)
1509 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1510 uint32_t mode = i40e_led_get(hw);
1513 i40e_led_set(hw, 0, false);
1519 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1520 __rte_unused struct rte_eth_fc_conf *fc_conf)
1522 PMD_INIT_FUNC_TRACE();
1528 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1529 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1531 PMD_INIT_FUNC_TRACE();
1536 /* Add a MAC address, and update filters */
1538 i40e_macaddr_add(struct rte_eth_dev *dev,
1539 struct ether_addr *mac_addr,
1540 __attribute__((unused)) uint32_t index,
1541 __attribute__((unused)) uint32_t pool)
1543 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1544 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545 struct i40e_mac_filter_info mac_filter;
1546 struct i40e_vsi *vsi = pf->main_vsi;
1547 struct ether_addr old_mac;
1550 if (!is_valid_assigned_ether_addr(mac_addr)) {
1551 PMD_DRV_LOG(ERR, "Invalid ethernet address");
1555 if (is_same_ether_addr(mac_addr, &(pf->dev_addr))) {
1556 PMD_DRV_LOG(INFO, "Ignore adding permanent mac address");
1560 /* Write mac address */
1561 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1562 mac_addr->addr_bytes, NULL);
1563 if (ret != I40E_SUCCESS) {
1564 PMD_DRV_LOG(ERR, "Failed to write mac address");
1568 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1569 (void)rte_memcpy(hw->mac.addr, mac_addr->addr_bytes,
1571 (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1572 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1574 ret = i40e_vsi_add_mac(vsi, &mac_filter);
1575 if (ret != I40E_SUCCESS) {
1576 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1580 ether_addr_copy(mac_addr, &pf->dev_addr);
1581 i40e_vsi_delete_mac(vsi, &old_mac);
1584 /* Remove a MAC address, and update filters */
1586 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1588 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1589 struct i40e_vsi *vsi = pf->main_vsi;
1590 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1591 struct ether_addr *macaddr;
1593 struct i40e_hw *hw =
1594 I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1596 if (index >= vsi->max_macaddrs)
1599 macaddr = &(data->mac_addrs[index]);
1600 if (!is_valid_assigned_ether_addr(macaddr))
1603 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY,
1604 hw->mac.perm_addr, NULL);
1605 if (ret != I40E_SUCCESS) {
1606 PMD_DRV_LOG(ERR, "Failed to write mac address");
1610 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
1612 ret = i40e_vsi_delete_mac(vsi, macaddr);
1613 if (ret != I40E_SUCCESS)
1616 /* Clear device address as it has been removed */
1617 if (is_same_ether_addr(&(pf->dev_addr), macaddr))
1618 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1621 /* Set perfect match or hash match of MAC and VLAN for a VF */
1623 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1624 struct rte_eth_mac_filter *filter,
1628 struct i40e_mac_filter_info mac_filter;
1629 struct ether_addr old_mac;
1630 struct ether_addr *new_mac;
1631 struct i40e_pf_vf *vf = NULL;
1636 PMD_DRV_LOG(ERR, "Invalid PF argument\n");
1639 hw = I40E_PF_TO_HW(pf);
1641 if (filter == NULL) {
1642 PMD_DRV_LOG(ERR, "Invalid mac filter argument\n");
1646 new_mac = &filter->mac_addr;
1648 if (is_zero_ether_addr(new_mac)) {
1649 PMD_DRV_LOG(ERR, "Invalid ethernet address\n");
1653 vf_id = filter->dst_id;
1655 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1656 PMD_DRV_LOG(ERR, "Invalid argument\n");
1659 vf = &pf->vfs[vf_id];
1661 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1662 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address\n");
1667 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1668 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1670 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1673 mac_filter.filter_type = filter->filter_type;
1674 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1675 if (ret != I40E_SUCCESS) {
1676 PMD_DRV_LOG(ERR, "Failed to add MAC filter\n");
1679 ether_addr_copy(new_mac, &pf->dev_addr);
1681 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1683 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1684 if (ret != I40E_SUCCESS) {
1685 PMD_DRV_LOG(ERR, "Failed to delete MAC filter\n");
1689 /* Clear device address as it has been removed */
1690 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1691 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1697 /* MAC filter handle */
1699 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1702 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1703 struct rte_eth_mac_filter *filter;
1704 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1705 int ret = I40E_NOT_SUPPORTED;
1707 filter = (struct rte_eth_mac_filter *)(arg);
1709 switch (filter_op) {
1710 case RTE_ETH_FILTER_NONE:
1713 case RTE_ETH_FILTER_ADD:
1714 i40e_pf_disable_irq0(hw);
1716 ret = i40e_vf_mac_filter_set(pf, filter, 1);
1717 i40e_pf_enable_irq0(hw);
1719 case RTE_ETH_FILTER_DELETE:
1720 i40e_pf_disable_irq0(hw);
1722 ret = i40e_vf_mac_filter_set(pf, filter, 0);
1723 i40e_pf_enable_irq0(hw);
1726 PMD_DRV_LOG(ERR, "unknown operation %u\n", filter_op);
1727 ret = I40E_ERR_PARAM;
1735 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1736 struct rte_eth_rss_reta *reta_conf)
1738 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1740 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1742 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1744 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1746 mask = (uint8_t)((reta_conf->mask_hi >>
1755 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1757 for (j = 0, lut = 0; j < 4; j++) {
1758 if (mask & (0x1 << j))
1759 lut |= reta_conf->reta[i + j] << (8 * j);
1761 lut |= l & (0xFF << (8 * j));
1763 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1770 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1771 struct rte_eth_rss_reta *reta_conf)
1773 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1775 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1777 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1779 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1781 mask = (uint8_t)((reta_conf->mask_hi >>
1787 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1788 for (j = 0; j < 4; j++) {
1789 if (mask & (0x1 << j))
1790 reta_conf->reta[i + j] =
1791 (uint8_t)((lut >> (8 * j)) & 0xFF);
1799 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1800 * @hw: pointer to the HW structure
1801 * @mem: pointer to mem struct to fill out
1802 * @size: size of memory requested
1803 * @alignment: what to align the allocation to
1805 enum i40e_status_code
1806 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1807 struct i40e_dma_mem *mem,
1811 static uint64_t id = 0;
1812 const struct rte_memzone *mz = NULL;
1813 char z_name[RTE_MEMZONE_NAMESIZE];
1816 return I40E_ERR_PARAM;
1819 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1820 #ifdef RTE_LIBRTE_XEN_DOM0
1821 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1824 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1827 return I40E_ERR_NO_MEMORY;
1832 #ifdef RTE_LIBRTE_XEN_DOM0
1833 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1835 mem->pa = mz->phys_addr;
1838 return I40E_SUCCESS;
1842 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1843 * @hw: pointer to the HW structure
1844 * @mem: ptr to mem struct to free
1846 enum i40e_status_code
1847 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1848 struct i40e_dma_mem *mem)
1850 if (!mem || !mem->va)
1851 return I40E_ERR_PARAM;
1856 return I40E_SUCCESS;
1860 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1861 * @hw: pointer to the HW structure
1862 * @mem: pointer to mem struct to fill out
1863 * @size: size of memory requested
1865 enum i40e_status_code
1866 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1867 struct i40e_virt_mem *mem,
1871 return I40E_ERR_PARAM;
1874 mem->va = rte_zmalloc("i40e", size, 0);
1877 return I40E_SUCCESS;
1879 return I40E_ERR_NO_MEMORY;
1883 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1884 * @hw: pointer to the HW structure
1885 * @mem: pointer to mem struct to free
1887 enum i40e_status_code
1888 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1889 struct i40e_virt_mem *mem)
1892 return I40E_ERR_PARAM;
1897 return I40E_SUCCESS;
1901 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1903 rte_spinlock_init(&sp->spinlock);
1907 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1909 rte_spinlock_lock(&sp->spinlock);
1913 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1915 rte_spinlock_unlock(&sp->spinlock);
1919 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1925 * Get the hardware capabilities, which will be parsed
1926 * and saved into struct i40e_hw.
1929 i40e_get_cap(struct i40e_hw *hw)
1931 struct i40e_aqc_list_capabilities_element_resp *buf;
1932 uint16_t len, size = 0;
1935 /* Calculate a huge enough buff for saving response data temporarily */
1936 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1937 I40E_MAX_CAP_ELE_NUM;
1938 buf = rte_zmalloc("i40e", len, 0);
1940 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1941 return I40E_ERR_NO_MEMORY;
1944 /* Get, parse the capabilities and save it to hw */
1945 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1946 i40e_aqc_opc_list_func_capabilities, NULL);
1947 if (ret != I40E_SUCCESS)
1948 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1950 /* Free the temporary buffer after being used */
1957 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1959 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1960 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1961 uint16_t sum_queues = 0, sum_vsis;
1963 /* First check if FW support SRIOV */
1964 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1965 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
1969 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
1970 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
1971 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
1972 /* Allocate queues for pf */
1973 if (hw->func_caps.rss) {
1974 pf->flags |= I40E_FLAG_RSS;
1975 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
1976 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
1977 pf->lan_nb_qps = i40e_prev_power_of_2(pf->lan_nb_qps);
1980 sum_queues = pf->lan_nb_qps;
1981 /* Default VSI is not counted in */
1983 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
1985 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
1986 pf->flags |= I40E_FLAG_SRIOV;
1987 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
1988 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
1989 PMD_INIT_LOG(ERR, "Config VF number %u, "
1990 "max supported %u.",
1991 dev->pci_dev->max_vfs,
1992 hw->func_caps.num_vfs);
1995 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
1996 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
1997 "max support %u queues.",
1998 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2001 pf->vf_num = dev->pci_dev->max_vfs;
2002 sum_queues += pf->vf_nb_qps * pf->vf_num;
2003 sum_vsis += pf->vf_num;
2004 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2005 pf->vf_num, pf->vf_nb_qps);
2009 if (hw->func_caps.vmdq) {
2010 pf->flags |= I40E_FLAG_VMDQ;
2011 pf->vmdq_nb_qps = I40E_DEFAULT_QP_NUM_VMDQ;
2012 sum_queues += pf->vmdq_nb_qps;
2014 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2017 if (hw->func_caps.fd) {
2018 pf->flags |= I40E_FLAG_FDIR;
2019 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2021 * Each flow director consumes one VSI and one queue,
2022 * but can't calculate out predictably here.
2026 if (sum_vsis > pf->max_num_vsi ||
2027 sum_queues > hw->func_caps.num_rx_qp) {
2028 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2029 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2030 pf->max_num_vsi, sum_vsis);
2031 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2032 hw->func_caps.num_rx_qp, sum_queues);
2036 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2038 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2039 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2040 sum_vsis, hw->func_caps.num_msix_vectors);
2043 return I40E_SUCCESS;
2047 i40e_pf_get_switch_config(struct i40e_pf *pf)
2049 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2050 struct i40e_aqc_get_switch_config_resp *switch_config;
2051 struct i40e_aqc_switch_config_element_resp *element;
2052 uint16_t start_seid = 0, num_reported;
2055 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2056 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2057 if (!switch_config) {
2058 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2062 /* Get the switch configurations */
2063 ret = i40e_aq_get_switch_config(hw, switch_config,
2064 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2065 if (ret != I40E_SUCCESS) {
2066 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2069 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2070 if (num_reported != 1) { /* The number should be 1 */
2071 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2075 /* Parse the switch configuration elements */
2076 element = &(switch_config->element[0]);
2077 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2078 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2079 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2081 PMD_DRV_LOG(INFO, "Unknown element type");
2084 rte_free(switch_config);
2090 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2093 struct pool_entry *entry;
2095 if (pool == NULL || num == 0)
2098 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2099 if (entry == NULL) {
2100 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2104 /* queue heap initialize */
2105 pool->num_free = num;
2106 pool->num_alloc = 0;
2108 LIST_INIT(&pool->alloc_list);
2109 LIST_INIT(&pool->free_list);
2111 /* Initialize element */
2115 LIST_INSERT_HEAD(&pool->free_list, entry, next);
2120 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2122 struct pool_entry *entry;
2127 LIST_FOREACH(entry, &pool->alloc_list, next) {
2128 LIST_REMOVE(entry, next);
2132 LIST_FOREACH(entry, &pool->free_list, next) {
2133 LIST_REMOVE(entry, next);
2138 pool->num_alloc = 0;
2140 LIST_INIT(&pool->alloc_list);
2141 LIST_INIT(&pool->free_list);
2145 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2148 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2149 uint32_t pool_offset;
2153 PMD_DRV_LOG(ERR, "Invalid parameter");
2157 pool_offset = base - pool->base;
2158 /* Lookup in alloc list */
2159 LIST_FOREACH(entry, &pool->alloc_list, next) {
2160 if (entry->base == pool_offset) {
2161 valid_entry = entry;
2162 LIST_REMOVE(entry, next);
2167 /* Not find, return */
2168 if (valid_entry == NULL) {
2169 PMD_DRV_LOG(ERR, "Failed to find entry");
2174 * Found it, move it to free list and try to merge.
2175 * In order to make merge easier, always sort it by qbase.
2176 * Find adjacent prev and last entries.
2179 LIST_FOREACH(entry, &pool->free_list, next) {
2180 if (entry->base > valid_entry->base) {
2188 /* Try to merge with next one*/
2190 /* Merge with next one */
2191 if (valid_entry->base + valid_entry->len == next->base) {
2192 next->base = valid_entry->base;
2193 next->len += valid_entry->len;
2194 rte_free(valid_entry);
2201 /* Merge with previous one */
2202 if (prev->base + prev->len == valid_entry->base) {
2203 prev->len += valid_entry->len;
2204 /* If it merge with next one, remove next node */
2206 LIST_REMOVE(valid_entry, next);
2207 rte_free(valid_entry);
2209 rte_free(valid_entry);
2215 /* Not find any entry to merge, insert */
2218 LIST_INSERT_AFTER(prev, valid_entry, next);
2219 else if (next != NULL)
2220 LIST_INSERT_BEFORE(next, valid_entry, next);
2221 else /* It's empty list, insert to head */
2222 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2225 pool->num_free += valid_entry->len;
2226 pool->num_alloc -= valid_entry->len;
2232 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2235 struct pool_entry *entry, *valid_entry;
2237 if (pool == NULL || num == 0) {
2238 PMD_DRV_LOG(ERR, "Invalid parameter");
2242 if (pool->num_free < num) {
2243 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2244 num, pool->num_free);
2249 /* Lookup in free list and find most fit one */
2250 LIST_FOREACH(entry, &pool->free_list, next) {
2251 if (entry->len >= num) {
2253 if (entry->len == num) {
2254 valid_entry = entry;
2257 if (valid_entry == NULL || valid_entry->len > entry->len)
2258 valid_entry = entry;
2262 /* Not find one to satisfy the request, return */
2263 if (valid_entry == NULL) {
2264 PMD_DRV_LOG(ERR, "No valid entry found");
2268 * The entry have equal queue number as requested,
2269 * remove it from alloc_list.
2271 if (valid_entry->len == num) {
2272 LIST_REMOVE(valid_entry, next);
2275 * The entry have more numbers than requested,
2276 * create a new entry for alloc_list and minus its
2277 * queue base and number in free_list.
2279 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2280 if (entry == NULL) {
2281 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2285 entry->base = valid_entry->base;
2287 valid_entry->base += num;
2288 valid_entry->len -= num;
2289 valid_entry = entry;
2292 /* Insert it into alloc list, not sorted */
2293 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2295 pool->num_free -= valid_entry->len;
2296 pool->num_alloc += valid_entry->len;
2298 return (valid_entry->base + pool->base);
2302 * bitmap_is_subset - Check whether src2 is subset of src1
2305 bitmap_is_subset(uint8_t src1, uint8_t src2)
2307 return !((src1 ^ src2) & src2);
2311 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2313 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2315 /* If DCB is not supported, only default TC is supported */
2316 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2317 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2321 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2322 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2323 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2327 return I40E_SUCCESS;
2331 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2332 struct i40e_vsi_vlan_pvid_info *info)
2335 struct i40e_vsi_context ctxt;
2336 uint8_t vlan_flags = 0;
2339 if (vsi == NULL || info == NULL) {
2340 PMD_DRV_LOG(ERR, "invalid parameters");
2341 return I40E_ERR_PARAM;
2345 vsi->info.pvid = info->config.pvid;
2347 * If insert pvid is enabled, only tagged pkts are
2348 * allowed to be sent out.
2350 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2351 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2354 if (info->config.reject.tagged == 0)
2355 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2357 if (info->config.reject.untagged == 0)
2358 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2360 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2361 I40E_AQ_VSI_PVLAN_MODE_MASK);
2362 vsi->info.port_vlan_flags |= vlan_flags;
2363 vsi->info.valid_sections =
2364 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2365 memset(&ctxt, 0, sizeof(ctxt));
2366 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2367 ctxt.seid = vsi->seid;
2369 hw = I40E_VSI_TO_HW(vsi);
2370 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2371 if (ret != I40E_SUCCESS)
2372 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2378 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2380 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2382 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2384 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2385 if (ret != I40E_SUCCESS)
2389 PMD_DRV_LOG(ERR, "seid not valid");
2393 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2394 tc_bw_data.tc_valid_bits = enabled_tcmap;
2395 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2396 tc_bw_data.tc_bw_credits[i] =
2397 (enabled_tcmap & (1 << i)) ? 1 : 0;
2399 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2400 if (ret != I40E_SUCCESS) {
2401 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2405 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2406 sizeof(vsi->info.qs_handle));
2407 return I40E_SUCCESS;
2411 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2412 struct i40e_aqc_vsi_properties_data *info,
2413 uint8_t enabled_tcmap)
2415 int ret, total_tc = 0, i;
2416 uint16_t qpnum_per_tc, bsf, qp_idx;
2418 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2419 if (ret != I40E_SUCCESS)
2422 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2423 if (enabled_tcmap & (1 << i))
2425 vsi->enabled_tc = enabled_tcmap;
2427 /* Number of queues per enabled TC */
2428 qpnum_per_tc = i40e_prev_power_of_2(vsi->nb_qps / total_tc);
2429 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2430 bsf = rte_bsf32(qpnum_per_tc);
2432 /* Adjust the queue number to actual queues that can be applied */
2433 vsi->nb_qps = qpnum_per_tc * total_tc;
2436 * Configure TC and queue mapping parameters, for enabled TC,
2437 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2438 * default queue will serve it.
2441 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2442 if (vsi->enabled_tc & (1 << i)) {
2443 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2444 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2445 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2446 qp_idx += qpnum_per_tc;
2448 info->tc_mapping[i] = 0;
2451 /* Associate queue number with VSI */
2452 if (vsi->type == I40E_VSI_SRIOV) {
2453 info->mapping_flags |=
2454 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2455 for (i = 0; i < vsi->nb_qps; i++)
2456 info->queue_mapping[i] =
2457 rte_cpu_to_le_16(vsi->base_queue + i);
2459 info->mapping_flags |=
2460 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2461 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2463 info->valid_sections =
2464 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2466 return I40E_SUCCESS;
2470 i40e_veb_release(struct i40e_veb *veb)
2472 struct i40e_vsi *vsi;
2475 if (veb == NULL || veb->associate_vsi == NULL)
2478 if (!TAILQ_EMPTY(&veb->head)) {
2479 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2483 vsi = veb->associate_vsi;
2484 hw = I40E_VSI_TO_HW(vsi);
2486 vsi->uplink_seid = veb->uplink_seid;
2487 i40e_aq_delete_element(hw, veb->seid, NULL);
2490 return I40E_SUCCESS;
2494 static struct i40e_veb *
2495 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2497 struct i40e_veb *veb;
2501 if (NULL == pf || vsi == NULL) {
2502 PMD_DRV_LOG(ERR, "veb setup failed, "
2503 "associated VSI shouldn't null");
2506 hw = I40E_PF_TO_HW(pf);
2508 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2510 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2514 veb->associate_vsi = vsi;
2515 TAILQ_INIT(&veb->head);
2516 veb->uplink_seid = vsi->uplink_seid;
2518 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2519 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2521 if (ret != I40E_SUCCESS) {
2522 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2523 hw->aq.asq_last_status);
2527 /* get statistics index */
2528 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2529 &veb->stats_idx, NULL, NULL, NULL);
2530 if (ret != I40E_SUCCESS) {
2531 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2532 hw->aq.asq_last_status);
2536 /* Get VEB bandwidth, to be implemented */
2537 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2538 vsi->uplink_seid = veb->seid;
2547 i40e_vsi_release(struct i40e_vsi *vsi)
2551 struct i40e_vsi_list *vsi_list;
2553 struct i40e_mac_filter *f;
2556 return I40E_SUCCESS;
2558 pf = I40E_VSI_TO_PF(vsi);
2559 hw = I40E_VSI_TO_HW(vsi);
2561 /* VSI has child to attach, release child first */
2563 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2564 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2566 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2568 i40e_veb_release(vsi->veb);
2571 /* Remove all macvlan filters of the VSI */
2572 i40e_vsi_remove_all_macvlan_filter(vsi);
2573 TAILQ_FOREACH(f, &vsi->mac_list, next)
2576 if (vsi->type != I40E_VSI_MAIN) {
2577 /* Remove vsi from parent's sibling list */
2578 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2579 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2580 return I40E_ERR_PARAM;
2582 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2583 &vsi->sib_vsi_list, list);
2585 /* Remove all switch element of the VSI */
2586 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2587 if (ret != I40E_SUCCESS)
2588 PMD_DRV_LOG(ERR, "Failed to delete element");
2590 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2592 if (vsi->type != I40E_VSI_SRIOV)
2593 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2596 return I40E_SUCCESS;
2600 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2602 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2603 struct i40e_aqc_remove_macvlan_element_data def_filter;
2604 struct i40e_mac_filter_info filter;
2607 if (vsi->type != I40E_VSI_MAIN)
2608 return I40E_ERR_CONFIG;
2609 memset(&def_filter, 0, sizeof(def_filter));
2610 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2612 def_filter.vlan_tag = 0;
2613 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2614 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2615 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2616 if (ret != I40E_SUCCESS) {
2617 struct i40e_mac_filter *f;
2618 struct ether_addr *mac;
2620 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2622 /* It needs to add the permanent mac into mac list */
2623 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2625 PMD_DRV_LOG(ERR, "failed to allocate memory");
2626 return I40E_ERR_NO_MEMORY;
2628 mac = &f->mac_info.mac_addr;
2629 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2631 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2636 (void)rte_memcpy(&filter.mac_addr,
2637 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2638 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2639 return i40e_vsi_add_mac(vsi, &filter);
2643 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2645 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2646 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2647 struct i40e_hw *hw = &vsi->adapter->hw;
2651 memset(&bw_config, 0, sizeof(bw_config));
2652 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2653 if (ret != I40E_SUCCESS) {
2654 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2655 hw->aq.asq_last_status);
2659 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2660 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2661 &ets_sla_config, NULL);
2662 if (ret != I40E_SUCCESS) {
2663 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2664 "configuration %u", hw->aq.asq_last_status);
2668 /* Not store the info yet, just print out */
2669 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2670 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2671 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2672 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2673 ets_sla_config.share_credits[i]);
2674 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2675 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2676 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2677 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2686 i40e_vsi_setup(struct i40e_pf *pf,
2687 enum i40e_vsi_type type,
2688 struct i40e_vsi *uplink_vsi,
2689 uint16_t user_param)
2691 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2692 struct i40e_vsi *vsi;
2693 struct i40e_mac_filter_info filter;
2695 struct i40e_vsi_context ctxt;
2696 struct ether_addr broadcast =
2697 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2699 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2700 PMD_DRV_LOG(ERR, "VSI setup failed, "
2701 "VSI link shouldn't be NULL");
2705 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2706 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2707 "uplink VSI should be NULL");
2711 /* If uplink vsi didn't setup VEB, create one first */
2712 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2713 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2715 if (NULL == uplink_vsi->veb) {
2716 PMD_DRV_LOG(ERR, "VEB setup failed");
2721 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2723 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2726 TAILQ_INIT(&vsi->mac_list);
2728 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2729 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2730 vsi->parent_vsi = uplink_vsi;
2731 vsi->user_param = user_param;
2732 /* Allocate queues */
2733 switch (vsi->type) {
2734 case I40E_VSI_MAIN :
2735 vsi->nb_qps = pf->lan_nb_qps;
2737 case I40E_VSI_SRIOV :
2738 vsi->nb_qps = pf->vf_nb_qps;
2743 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2745 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2749 vsi->base_queue = ret;
2751 /* VF has MSIX interrupt in VF range, don't allocate here */
2752 if (type != I40E_VSI_SRIOV) {
2753 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2755 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2756 goto fail_queue_alloc;
2758 vsi->msix_intr = ret;
2762 if (type == I40E_VSI_MAIN) {
2763 /* For main VSI, no need to add since it's default one */
2764 vsi->uplink_seid = pf->mac_seid;
2765 vsi->seid = pf->main_vsi_seid;
2766 /* Bind queues with specific MSIX interrupt */
2768 * Needs 2 interrupt at least, one for misc cause which will
2769 * enabled from OS side, Another for queues binding the
2770 * interrupt from device side only.
2773 /* Get default VSI parameters from hardware */
2774 memset(&ctxt, 0, sizeof(ctxt));
2775 ctxt.seid = vsi->seid;
2776 ctxt.pf_num = hw->pf_id;
2777 ctxt.uplink_seid = vsi->uplink_seid;
2779 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2780 if (ret != I40E_SUCCESS) {
2781 PMD_DRV_LOG(ERR, "Failed to get VSI params");
2782 goto fail_msix_alloc;
2784 (void)rte_memcpy(&vsi->info, &ctxt.info,
2785 sizeof(struct i40e_aqc_vsi_properties_data));
2786 vsi->vsi_id = ctxt.vsi_number;
2787 vsi->info.valid_sections = 0;
2789 /* Configure tc, enabled TC0 only */
2790 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2792 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2793 goto fail_msix_alloc;
2796 /* TC, queue mapping */
2797 memset(&ctxt, 0, sizeof(ctxt));
2798 vsi->info.valid_sections |=
2799 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2800 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2801 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2802 (void)rte_memcpy(&ctxt.info, &vsi->info,
2803 sizeof(struct i40e_aqc_vsi_properties_data));
2804 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2805 I40E_DEFAULT_TCMAP);
2806 if (ret != I40E_SUCCESS) {
2807 PMD_DRV_LOG(ERR, "Failed to configure "
2808 "TC queue mapping");
2809 goto fail_msix_alloc;
2811 ctxt.seid = vsi->seid;
2812 ctxt.pf_num = hw->pf_id;
2813 ctxt.uplink_seid = vsi->uplink_seid;
2816 /* Update VSI parameters */
2817 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2818 if (ret != I40E_SUCCESS) {
2819 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2820 goto fail_msix_alloc;
2823 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2824 sizeof(vsi->info.tc_mapping));
2825 (void)rte_memcpy(&vsi->info.queue_mapping,
2826 &ctxt.info.queue_mapping,
2827 sizeof(vsi->info.queue_mapping));
2828 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2829 vsi->info.valid_sections = 0;
2831 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2835 * Updating default filter settings are necessary to prevent
2836 * reception of tagged packets.
2837 * Some old firmware configurations load a default macvlan
2838 * filter which accepts both tagged and untagged packets.
2839 * The updating is to use a normal filter instead if needed.
2840 * For NVM 4.2.2 or after, the updating is not needed anymore.
2841 * The firmware with correct configurations load the default
2842 * macvlan filter which is expected and cannot be removed.
2844 i40e_update_default_filter_setting(vsi);
2845 } else if (type == I40E_VSI_SRIOV) {
2846 memset(&ctxt, 0, sizeof(ctxt));
2848 * For other VSI, the uplink_seid equals to uplink VSI's
2849 * uplink_seid since they share same VEB
2851 vsi->uplink_seid = uplink_vsi->uplink_seid;
2852 ctxt.pf_num = hw->pf_id;
2853 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2854 ctxt.uplink_seid = vsi->uplink_seid;
2855 ctxt.connection_type = 0x1;
2856 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2858 /* Configure switch ID */
2859 ctxt.info.valid_sections |=
2860 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2861 ctxt.info.switch_id =
2862 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2863 /* Configure port/vlan */
2864 ctxt.info.valid_sections |=
2865 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2866 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2867 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2868 I40E_DEFAULT_TCMAP);
2869 if (ret != I40E_SUCCESS) {
2870 PMD_DRV_LOG(ERR, "Failed to configure "
2871 "TC queue mapping");
2872 goto fail_msix_alloc;
2874 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2875 ctxt.info.valid_sections |=
2876 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2878 * Since VSI is not created yet, only configure parameter,
2879 * will add vsi below.
2883 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2884 goto fail_msix_alloc;
2887 if (vsi->type != I40E_VSI_MAIN) {
2888 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2890 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2891 hw->aq.asq_last_status);
2892 goto fail_msix_alloc;
2894 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2895 vsi->info.valid_sections = 0;
2896 vsi->seid = ctxt.seid;
2897 vsi->vsi_id = ctxt.vsi_number;
2898 vsi->sib_vsi_list.vsi = vsi;
2899 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2900 &vsi->sib_vsi_list, list);
2903 /* MAC/VLAN configuration */
2904 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
2905 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2907 ret = i40e_vsi_add_mac(vsi, &filter);
2908 if (ret != I40E_SUCCESS) {
2909 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
2910 goto fail_msix_alloc;
2913 /* Get VSI BW information */
2914 i40e_vsi_dump_bw_config(vsi);
2917 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
2919 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
2925 /* Configure vlan stripping on or off */
2927 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
2929 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2930 struct i40e_vsi_context ctxt;
2932 int ret = I40E_SUCCESS;
2934 /* Check if it has been already on or off */
2935 if (vsi->info.valid_sections &
2936 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
2938 if ((vsi->info.port_vlan_flags &
2939 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
2940 return 0; /* already on */
2942 if ((vsi->info.port_vlan_flags &
2943 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2944 I40E_AQ_VSI_PVLAN_EMOD_MASK)
2945 return 0; /* already off */
2950 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2952 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2953 vsi->info.valid_sections =
2954 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2955 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
2956 vsi->info.port_vlan_flags |= vlan_flags;
2957 ctxt.seid = vsi->seid;
2958 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2959 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2961 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2962 on ? "enable" : "disable");
2968 i40e_dev_init_vlan(struct rte_eth_dev *dev)
2970 struct rte_eth_dev_data *data = dev->data;
2973 /* Apply vlan offload setting */
2974 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
2976 /* Apply double-vlan setting, not implemented yet */
2978 /* Apply pvid setting */
2979 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
2980 data->dev_conf.txmode.hw_vlan_insert_pvid);
2982 PMD_DRV_LOG(INFO, "Failed to update VSI params");
2988 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
2990 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2992 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
2996 i40e_update_flow_control(struct i40e_hw *hw)
2998 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
2999 struct i40e_link_status link_status;
3000 uint32_t rxfc = 0, txfc = 0, reg;
3004 memset(&link_status, 0, sizeof(link_status));
3005 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3006 if (ret != I40E_SUCCESS) {
3007 PMD_DRV_LOG(ERR, "Failed to get link status information");
3008 goto write_reg; /* Disable flow control */
3011 an_info = hw->phy.link_info.an_info;
3012 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3013 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3014 ret = I40E_ERR_NOT_READY;
3015 goto write_reg; /* Disable flow control */
3018 * If link auto negotiation is enabled, flow control needs to
3019 * be configured according to it
3021 switch (an_info & I40E_LINK_PAUSE_RXTX) {
3022 case I40E_LINK_PAUSE_RXTX:
3025 hw->fc.current_mode = I40E_FC_FULL;
3027 case I40E_AQ_LINK_PAUSE_RX:
3029 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3031 case I40E_AQ_LINK_PAUSE_TX:
3033 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3036 hw->fc.current_mode = I40E_FC_NONE;
3041 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3042 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3043 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3044 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3045 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3046 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3053 i40e_pf_setup(struct i40e_pf *pf)
3055 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3056 struct i40e_filter_control_settings settings;
3057 struct rte_eth_dev_data *dev_data = pf->dev_data;
3058 struct i40e_vsi *vsi;
3061 /* Clear all stats counters */
3062 pf->offset_loaded = FALSE;
3063 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3064 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3066 ret = i40e_pf_get_switch_config(pf);
3067 if (ret != I40E_SUCCESS) {
3068 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3073 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3075 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3076 return I40E_ERR_NOT_READY;
3079 dev_data->nb_rx_queues = vsi->nb_qps;
3080 dev_data->nb_tx_queues = vsi->nb_qps;
3082 /* Configure filter control */
3083 memset(&settings, 0, sizeof(settings));
3084 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3085 /* Enable ethtype and macvlan filters */
3086 settings.enable_ethtype = TRUE;
3087 settings.enable_macvlan = TRUE;
3088 ret = i40e_set_filter_control(hw, &settings);
3090 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3093 /* Update flow control according to the auto negotiation */
3094 i40e_update_flow_control(hw);
3096 return I40E_SUCCESS;
3100 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3106 * Set or clear TX Queue Disable flags,
3107 * which is required by hardware.
3109 i40e_pre_tx_queue_cfg(hw, q_idx, on);
3110 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3112 /* Wait until the request is finished */
3113 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3114 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3115 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3116 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3117 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3123 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3124 return I40E_SUCCESS; /* already on, skip next steps */
3126 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3127 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3129 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3130 return I40E_SUCCESS; /* already off, skip next steps */
3131 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3133 /* Write the register */
3134 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3135 /* Check the result */
3136 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3137 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3138 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3140 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3141 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3144 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3145 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3149 /* Check if it is timeout */
3150 if (j >= I40E_CHK_Q_ENA_COUNT) {
3151 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3152 (on ? "enable" : "disable"), q_idx);
3153 return I40E_ERR_TIMEOUT;
3156 return I40E_SUCCESS;
3159 /* Swith on or off the tx queues */
3161 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3163 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3164 struct i40e_tx_queue *txq;
3165 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3169 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3170 txq = dev_data->tx_queues[i];
3171 /* Don't operate the queue if not configured or
3172 * if starting only per queue */
3173 if (!txq->q_set || (on && txq->tx_deferred_start))
3176 ret = i40e_dev_tx_queue_start(dev, i);
3178 ret = i40e_dev_tx_queue_stop(dev, i);
3179 if ( ret != I40E_SUCCESS)
3183 return I40E_SUCCESS;
3187 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3192 /* Wait until the request is finished */
3193 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3194 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3195 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3196 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3197 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3202 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3203 return I40E_SUCCESS; /* Already on, skip next steps */
3204 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3206 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3207 return I40E_SUCCESS; /* Already off, skip next steps */
3208 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3211 /* Write the register */
3212 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3213 /* Check the result */
3214 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3215 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3216 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3218 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3219 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3222 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3223 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3228 /* Check if it is timeout */
3229 if (j >= I40E_CHK_Q_ENA_COUNT) {
3230 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3231 (on ? "enable" : "disable"), q_idx);
3232 return I40E_ERR_TIMEOUT;
3235 return I40E_SUCCESS;
3237 /* Switch on or off the rx queues */
3239 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3241 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3242 struct i40e_rx_queue *rxq;
3243 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3247 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3248 rxq = dev_data->rx_queues[i];
3249 /* Don't operate the queue if not configured or
3250 * if starting only per queue */
3251 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3254 ret = i40e_dev_rx_queue_start(dev, i);
3256 ret = i40e_dev_rx_queue_stop(dev, i);
3257 if (ret != I40E_SUCCESS)
3261 return I40E_SUCCESS;
3264 /* Switch on or off all the rx/tx queues */
3266 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3271 /* enable rx queues before enabling tx queues */
3272 ret = i40e_vsi_switch_rx_queues(vsi, on);
3274 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3277 ret = i40e_vsi_switch_tx_queues(vsi, on);
3279 /* Stop tx queues before stopping rx queues */
3280 ret = i40e_vsi_switch_tx_queues(vsi, on);
3282 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3285 ret = i40e_vsi_switch_rx_queues(vsi, on);
3291 /* Initialize VSI for TX */
3293 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3295 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3296 struct rte_eth_dev_data *data = pf->dev_data;
3298 uint32_t ret = I40E_SUCCESS;
3300 for (i = 0; i < data->nb_tx_queues; i++) {
3301 ret = i40e_tx_queue_init(data->tx_queues[i]);
3302 if (ret != I40E_SUCCESS)
3309 /* Initialize VSI for RX */
3311 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3313 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3314 struct rte_eth_dev_data *data = pf->dev_data;
3315 int ret = I40E_SUCCESS;
3318 i40e_pf_config_mq_rx(pf);
3319 for (i = 0; i < data->nb_rx_queues; i++) {
3320 ret = i40e_rx_queue_init(data->rx_queues[i]);
3321 if (ret != I40E_SUCCESS) {
3322 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3331 /* Initialize VSI */
3333 i40e_vsi_init(struct i40e_vsi *vsi)
3337 err = i40e_vsi_tx_init(vsi);
3339 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3342 err = i40e_vsi_rx_init(vsi);
3344 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3352 i40e_stat_update_32(struct i40e_hw *hw,
3360 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3364 if (new_data >= *offset)
3365 *stat = (uint64_t)(new_data - *offset);
3367 *stat = (uint64_t)((new_data +
3368 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3372 i40e_stat_update_48(struct i40e_hw *hw,
3381 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3382 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3383 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3388 if (new_data >= *offset)
3389 *stat = new_data - *offset;
3391 *stat = (uint64_t)((new_data +
3392 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3394 *stat &= I40E_48_BIT_MASK;
3399 i40e_pf_disable_irq0(struct i40e_hw *hw)
3401 /* Disable all interrupt types */
3402 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3403 I40E_WRITE_FLUSH(hw);
3408 i40e_pf_enable_irq0(struct i40e_hw *hw)
3410 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3411 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3412 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3413 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3414 I40E_WRITE_FLUSH(hw);
3418 i40e_pf_config_irq0(struct i40e_hw *hw)
3422 /* read pending request and disable first */
3423 i40e_pf_disable_irq0(hw);
3425 * Enable all interrupt error options to detect possible errors,
3426 * other informative int are ignored
3428 enable = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3429 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3430 I40E_PFINT_ICR0_ENA_GRST_MASK |
3431 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3432 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
3433 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3434 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3435 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3437 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3438 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3439 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3441 /* Link no queues with irq0 */
3442 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3443 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3447 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3449 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3450 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3453 uint32_t index, offset, val;
3458 * Try to find which VF trigger a reset, use absolute VF id to access
3459 * since the reg is global register.
3461 for (i = 0; i < pf->vf_num; i++) {
3462 abs_vf_id = hw->func_caps.vf_base_id + i;
3463 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3464 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3465 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3466 /* VFR event occured */
3467 if (val & (0x1 << offset)) {
3470 /* Clear the event first */
3471 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3473 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3475 * Only notify a VF reset event occured,
3476 * don't trigger another SW reset
3478 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3479 if (ret != I40E_SUCCESS)
3480 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3486 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3488 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3489 struct i40e_arq_event_info info;
3490 uint16_t pending, opcode;
3493 info.buf_len = I40E_AQ_BUF_SZ;
3494 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3495 if (!info.msg_buf) {
3496 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3502 ret = i40e_clean_arq_element(hw, &info, &pending);
3504 if (ret != I40E_SUCCESS) {
3505 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3506 "aq_err: %u", hw->aq.asq_last_status);
3509 opcode = rte_le_to_cpu_16(info.desc.opcode);
3512 case i40e_aqc_opc_send_msg_to_pf:
3513 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3514 i40e_pf_host_handle_vf_msg(dev,
3515 rte_le_to_cpu_16(info.desc.retval),
3516 rte_le_to_cpu_32(info.desc.cookie_high),
3517 rte_le_to_cpu_32(info.desc.cookie_low),
3522 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3527 rte_free(info.msg_buf);
3531 * Interrupt handler triggered by NIC for handling
3532 * specific interrupt.
3535 * Pointer to interrupt handle.
3537 * The address of parameter (struct rte_eth_dev *) regsitered before.
3543 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3546 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3547 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3548 uint32_t cause, enable;
3550 i40e_pf_disable_irq0(hw);
3552 cause = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3553 enable = I40E_READ_REG(hw, I40E_PFINT_ICR0_ENA);
3555 /* Shared IRQ case, return */
3556 if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3557 PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
3558 "no INT event to process", hw->pf_id);
3562 if (cause & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3563 PMD_DRV_LOG(INFO, "INT:Link status changed");
3564 i40e_dev_link_update(dev, 0);
3567 if (cause & I40E_PFINT_ICR0_ECC_ERR_MASK)
3568 PMD_DRV_LOG(INFO, "INT:Unrecoverable ECC Error");
3570 if (cause & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3571 PMD_DRV_LOG(INFO, "INT:Malicious programming detected");
3573 if (cause & I40E_PFINT_ICR0_GRST_MASK)
3574 PMD_DRV_LOG(INFO, "INT:Global Resets Requested");
3576 if (cause & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3577 PMD_DRV_LOG(INFO, "INT:PCI EXCEPTION occured");
3579 if (cause & I40E_PFINT_ICR0_HMC_ERR_MASK)
3580 PMD_DRV_LOG(INFO, "INT:HMC error occured");
3582 /* Add processing func to deal with VF reset vent */
3583 if (cause & I40E_PFINT_ICR0_VFLR_MASK) {
3584 PMD_DRV_LOG(INFO, "INT:VF reset detected");
3585 i40e_dev_handle_vfr_event(dev);
3587 /* Find admin queue event */
3588 if (cause & I40E_PFINT_ICR0_ADMINQ_MASK) {
3589 PMD_DRV_LOG(INFO, "INT:ADMINQ event");
3590 i40e_dev_handle_aq_msg(dev);
3594 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, enable);
3595 /* Re-enable interrupt from device side */
3596 i40e_pf_enable_irq0(hw);
3597 /* Re-enable interrupt from host side */
3598 rte_intr_enable(&(dev->pci_dev->intr_handle));
3602 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3603 struct i40e_macvlan_filter *filter,
3606 int ele_num, ele_buff_size;
3607 int num, actual_num, i;
3609 int ret = I40E_SUCCESS;
3610 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3611 struct i40e_aqc_add_macvlan_element_data *req_list;
3613 if (filter == NULL || total == 0)
3614 return I40E_ERR_PARAM;
3615 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3616 ele_buff_size = hw->aq.asq_buf_size;
3618 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3619 if (req_list == NULL) {
3620 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3621 return I40E_ERR_NO_MEMORY;
3626 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3627 memset(req_list, 0, ele_buff_size);
3629 for (i = 0; i < actual_num; i++) {
3630 (void)rte_memcpy(req_list[i].mac_addr,
3631 &filter[num + i].macaddr, ETH_ADDR_LEN);
3632 req_list[i].vlan_tag =
3633 rte_cpu_to_le_16(filter[num + i].vlan_id);
3635 switch (filter[num + i].filter_type) {
3636 case RTE_MAC_PERFECT_MATCH:
3637 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
3638 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3640 case RTE_MACVLAN_PERFECT_MATCH:
3641 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3643 case RTE_MAC_HASH_MATCH:
3644 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
3645 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3647 case RTE_MACVLAN_HASH_MATCH:
3648 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
3651 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
3652 ret = I40E_ERR_PARAM;
3656 req_list[i].queue_number = 0;
3658 req_list[i].flags = rte_cpu_to_le_16(flags);
3661 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3663 if (ret != I40E_SUCCESS) {
3664 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3668 } while (num < total);
3676 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3677 struct i40e_macvlan_filter *filter,
3680 int ele_num, ele_buff_size;
3681 int num, actual_num, i;
3683 int ret = I40E_SUCCESS;
3684 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3685 struct i40e_aqc_remove_macvlan_element_data *req_list;
3687 if (filter == NULL || total == 0)
3688 return I40E_ERR_PARAM;
3690 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3691 ele_buff_size = hw->aq.asq_buf_size;
3693 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3694 if (req_list == NULL) {
3695 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3696 return I40E_ERR_NO_MEMORY;
3701 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3702 memset(req_list, 0, ele_buff_size);
3704 for (i = 0; i < actual_num; i++) {
3705 (void)rte_memcpy(req_list[i].mac_addr,
3706 &filter[num + i].macaddr, ETH_ADDR_LEN);
3707 req_list[i].vlan_tag =
3708 rte_cpu_to_le_16(filter[num + i].vlan_id);
3710 switch (filter[num + i].filter_type) {
3711 case RTE_MAC_PERFECT_MATCH:
3712 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3713 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3715 case RTE_MACVLAN_PERFECT_MATCH:
3716 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3718 case RTE_MAC_HASH_MATCH:
3719 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
3720 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3722 case RTE_MACVLAN_HASH_MATCH:
3723 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
3726 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
3727 ret = I40E_ERR_PARAM;
3730 req_list[i].flags = rte_cpu_to_le_16(flags);
3733 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3735 if (ret != I40E_SUCCESS) {
3736 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3740 } while (num < total);
3747 /* Find out specific MAC filter */
3748 static struct i40e_mac_filter *
3749 i40e_find_mac_filter(struct i40e_vsi *vsi,
3750 struct ether_addr *macaddr)
3752 struct i40e_mac_filter *f;
3754 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3755 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
3763 i40e_find_vlan_filter(struct i40e_vsi *vsi,
3766 uint32_t vid_idx, vid_bit;
3768 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3769 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3771 if (vsi->vfta[vid_idx] & vid_bit)
3778 i40e_set_vlan_filter(struct i40e_vsi *vsi,
3779 uint16_t vlan_id, bool on)
3781 uint32_t vid_idx, vid_bit;
3783 #define UINT32_BIT_MASK 0x1F
3784 #define VALID_VLAN_BIT_MASK 0xFFF
3785 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
3786 * element first, then find the bits it belongs to
3788 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
3790 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
3793 vsi->vfta[vid_idx] |= vid_bit;
3795 vsi->vfta[vid_idx] &= ~vid_bit;
3799 * Find all vlan options for specific mac addr,
3800 * return with actual vlan found.
3803 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
3804 struct i40e_macvlan_filter *mv_f,
3805 int num, struct ether_addr *addr)
3811 * Not to use i40e_find_vlan_filter to decrease the loop time,
3812 * although the code looks complex.
3814 if (num < vsi->vlan_num)
3815 return I40E_ERR_PARAM;
3818 for (j = 0; j < I40E_VFTA_SIZE; j++) {
3820 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
3821 if (vsi->vfta[j] & (1 << k)) {
3823 PMD_DRV_LOG(ERR, "vlan number "
3825 return I40E_ERR_PARAM;
3827 (void)rte_memcpy(&mv_f[i].macaddr,
3828 addr, ETH_ADDR_LEN);
3830 j * I40E_UINT32_BIT_SIZE + k;
3836 return I40E_SUCCESS;
3840 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
3841 struct i40e_macvlan_filter *mv_f,
3846 struct i40e_mac_filter *f;
3848 if (num < vsi->mac_num)
3849 return I40E_ERR_PARAM;
3851 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3853 PMD_DRV_LOG(ERR, "buffer number not match");
3854 return I40E_ERR_PARAM;
3856 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
3858 mv_f[i].vlan_id = vlan;
3859 mv_f[i].filter_type = f->mac_info.filter_type;
3863 return I40E_SUCCESS;
3867 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
3870 struct i40e_mac_filter *f;
3871 struct i40e_macvlan_filter *mv_f;
3872 int ret = I40E_SUCCESS;
3874 if (vsi == NULL || vsi->mac_num == 0)
3875 return I40E_ERR_PARAM;
3877 /* Case that no vlan is set */
3878 if (vsi->vlan_num == 0)
3881 num = vsi->mac_num * vsi->vlan_num;
3883 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
3885 PMD_DRV_LOG(ERR, "failed to allocate memory");
3886 return I40E_ERR_NO_MEMORY;
3890 if (vsi->vlan_num == 0) {
3891 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3892 (void)rte_memcpy(&mv_f[i].macaddr,
3893 &f->mac_info.mac_addr, ETH_ADDR_LEN);
3894 mv_f[i].vlan_id = 0;
3898 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3899 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
3900 vsi->vlan_num, &f->mac_info.mac_addr);
3901 if (ret != I40E_SUCCESS)
3907 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
3915 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3917 struct i40e_macvlan_filter *mv_f;
3919 int ret = I40E_SUCCESS;
3921 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
3922 return I40E_ERR_PARAM;
3924 /* If it's already set, just return */
3925 if (i40e_find_vlan_filter(vsi,vlan))
3926 return I40E_SUCCESS;
3928 mac_num = vsi->mac_num;
3931 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3932 return I40E_ERR_PARAM;
3935 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3938 PMD_DRV_LOG(ERR, "failed to allocate memory");
3939 return I40E_ERR_NO_MEMORY;
3942 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3944 if (ret != I40E_SUCCESS)
3947 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
3949 if (ret != I40E_SUCCESS)
3952 i40e_set_vlan_filter(vsi, vlan, 1);
3962 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
3964 struct i40e_macvlan_filter *mv_f;
3966 int ret = I40E_SUCCESS;
3969 * Vlan 0 is the generic filter for untagged packets
3970 * and can't be removed.
3972 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
3973 return I40E_ERR_PARAM;
3975 /* If can't find it, just return */
3976 if (!i40e_find_vlan_filter(vsi, vlan))
3977 return I40E_ERR_PARAM;
3979 mac_num = vsi->mac_num;
3982 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
3983 return I40E_ERR_PARAM;
3986 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
3989 PMD_DRV_LOG(ERR, "failed to allocate memory");
3990 return I40E_ERR_NO_MEMORY;
3993 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
3995 if (ret != I40E_SUCCESS)
3998 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4000 if (ret != I40E_SUCCESS)
4003 /* This is last vlan to remove, replace all mac filter with vlan 0 */
4004 if (vsi->vlan_num == 1) {
4005 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4006 if (ret != I40E_SUCCESS)
4009 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4010 if (ret != I40E_SUCCESS)
4014 i40e_set_vlan_filter(vsi, vlan, 0);
4024 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4026 struct i40e_mac_filter *f;
4027 struct i40e_macvlan_filter *mv_f;
4028 int i, vlan_num = 0;
4029 int ret = I40E_SUCCESS;
4031 /* If it's add and we've config it, return */
4032 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4034 return I40E_SUCCESS;
4035 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4036 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4039 * If vlan_num is 0, that's the first time to add mac,
4040 * set mask for vlan_id 0.
4042 if (vsi->vlan_num == 0) {
4043 i40e_set_vlan_filter(vsi, 0, 1);
4046 vlan_num = vsi->vlan_num;
4047 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4048 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4051 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4053 PMD_DRV_LOG(ERR, "failed to allocate memory");
4054 return I40E_ERR_NO_MEMORY;
4057 for (i = 0; i < vlan_num; i++) {
4058 mv_f[i].filter_type = mac_filter->filter_type;
4059 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4063 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4064 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4065 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4066 &mac_filter->mac_addr);
4067 if (ret != I40E_SUCCESS)
4071 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4072 if (ret != I40E_SUCCESS)
4075 /* Add the mac addr into mac list */
4076 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4078 PMD_DRV_LOG(ERR, "failed to allocate memory");
4079 ret = I40E_ERR_NO_MEMORY;
4082 (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4084 f->mac_info.filter_type = mac_filter->filter_type;
4085 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4096 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4098 struct i40e_mac_filter *f;
4099 struct i40e_macvlan_filter *mv_f;
4101 enum rte_mac_filter_type filter_type;
4102 int ret = I40E_SUCCESS;
4104 /* Can't find it, return an error */
4105 f = i40e_find_mac_filter(vsi, addr);
4107 return I40E_ERR_PARAM;
4109 vlan_num = vsi->vlan_num;
4110 filter_type = f->mac_info.filter_type;
4111 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4112 filter_type == RTE_MACVLAN_HASH_MATCH) {
4113 if (vlan_num == 0) {
4114 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4115 return I40E_ERR_PARAM;
4117 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4118 filter_type == RTE_MAC_HASH_MATCH)
4121 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4123 PMD_DRV_LOG(ERR, "failed to allocate memory");
4124 return I40E_ERR_NO_MEMORY;
4127 for (i = 0; i < vlan_num; i++) {
4128 mv_f[i].filter_type = filter_type;
4129 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4132 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4133 filter_type == RTE_MACVLAN_HASH_MATCH) {
4134 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4135 if (ret != I40E_SUCCESS)
4139 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4140 if (ret != I40E_SUCCESS)
4143 /* Remove the mac addr into mac list */
4144 TAILQ_REMOVE(&vsi->mac_list, f, next);
4154 /* Configure hash enable flags for RSS */
4156 i40e_config_hena(uint64_t flags)
4163 if (flags & ETH_RSS_NONF_IPV4_UDP)
4164 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4165 if (flags & ETH_RSS_NONF_IPV4_TCP)
4166 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4167 if (flags & ETH_RSS_NONF_IPV4_SCTP)
4168 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4169 if (flags & ETH_RSS_NONF_IPV4_OTHER)
4170 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4171 if (flags & ETH_RSS_FRAG_IPV4)
4172 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4173 if (flags & ETH_RSS_NONF_IPV6_UDP)
4174 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4175 if (flags & ETH_RSS_NONF_IPV6_TCP)
4176 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4177 if (flags & ETH_RSS_NONF_IPV6_SCTP)
4178 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4179 if (flags & ETH_RSS_NONF_IPV6_OTHER)
4180 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4181 if (flags & ETH_RSS_FRAG_IPV6)
4182 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4183 if (flags & ETH_RSS_L2_PAYLOAD)
4184 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4189 /* Parse the hash enable flags */
4191 i40e_parse_hena(uint64_t flags)
4193 uint64_t rss_hf = 0;
4198 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4199 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
4200 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4201 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
4202 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4203 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
4204 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4205 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
4206 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4207 rss_hf |= ETH_RSS_FRAG_IPV4;
4208 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4209 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
4210 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4211 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
4212 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4213 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
4214 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4215 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
4216 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4217 rss_hf |= ETH_RSS_FRAG_IPV6;
4218 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4219 rss_hf |= ETH_RSS_L2_PAYLOAD;
4226 i40e_pf_disable_rss(struct i40e_pf *pf)
4228 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4231 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4232 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4233 hena &= ~I40E_RSS_HENA_ALL;
4234 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4235 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4236 I40E_WRITE_FLUSH(hw);
4240 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4243 uint8_t hash_key_len;
4248 hash_key = (uint32_t *)(rss_conf->rss_key);
4249 hash_key_len = rss_conf->rss_key_len;
4250 if (hash_key != NULL && hash_key_len >=
4251 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4252 /* Fill in RSS hash key */
4253 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4254 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4257 rss_hf = rss_conf->rss_hf;
4258 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4259 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4260 hena &= ~I40E_RSS_HENA_ALL;
4261 hena |= i40e_config_hena(rss_hf);
4262 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4263 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4264 I40E_WRITE_FLUSH(hw);
4270 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4271 struct rte_eth_rss_conf *rss_conf)
4273 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4274 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4277 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4278 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4279 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4280 if (rss_hf != 0) /* Enable RSS */
4282 return 0; /* Nothing to do */
4285 if (rss_hf == 0) /* Disable RSS */
4288 return i40e_hw_rss_hash_set(hw, rss_conf);
4292 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4293 struct rte_eth_rss_conf *rss_conf)
4295 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4296 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4300 if (hash_key != NULL) {
4301 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4302 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4303 rss_conf->rss_key_len = i * sizeof(uint32_t);
4305 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4306 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4307 rss_conf->rss_hf = i40e_parse_hena(hena);
4313 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4315 switch (filter_type) {
4316 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4317 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4319 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4320 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4322 case RTE_TUNNEL_FILTER_IMAC_TENID:
4323 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4325 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4326 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4328 case ETH_TUNNEL_FILTER_IMAC:
4329 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4332 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4340 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4341 struct rte_eth_tunnel_filter_conf *tunnel_filter,
4345 uint8_t tun_type = 0;
4347 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4348 struct i40e_vsi *vsi = pf->main_vsi;
4349 struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
4350 struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
4352 cld_filter = rte_zmalloc("tunnel_filter",
4353 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4356 if (NULL == cld_filter) {
4357 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4360 pfilter = cld_filter;
4362 (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4363 sizeof(struct ether_addr));
4364 (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4365 sizeof(struct ether_addr));
4367 pfilter->inner_vlan = tunnel_filter->inner_vlan;
4368 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4369 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4370 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4371 &tunnel_filter->ip_addr,
4372 sizeof(pfilter->ipaddr.v4.data));
4374 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4375 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4376 &tunnel_filter->ip_addr,
4377 sizeof(pfilter->ipaddr.v6.data));
4380 /* check tunneled type */
4381 switch (tunnel_filter->tunnel_type) {
4382 case RTE_TUNNEL_TYPE_VXLAN:
4383 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4386 /* Other tunnel types is not supported. */
4387 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4388 rte_free(cld_filter);
4392 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4395 rte_free(cld_filter);
4399 pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4400 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4401 pfilter->tenant_id = tunnel_filter->tenant_id;
4402 pfilter->queue_number = tunnel_filter->queue_id;
4405 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4407 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4410 rte_free(cld_filter);
4415 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4419 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4420 if (pf->vxlan_ports[i] == port)
4428 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4432 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4434 idx = i40e_get_vxlan_port_idx(pf, port);
4436 /* Check if port already exists */
4438 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4442 /* Now check if there is space to add the new port */
4443 idx = i40e_get_vxlan_port_idx(pf, 0);
4445 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4446 "not adding port %d", port);
4450 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4453 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4457 PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
4458 port, filter_index);
4460 /* New port: add it and mark its index in the bitmap */
4461 pf->vxlan_ports[idx] = port;
4462 pf->vxlan_bitmap |= (1 << idx);
4464 if (!(pf->flags & I40E_FLAG_VXLAN))
4465 pf->flags |= I40E_FLAG_VXLAN;
4471 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4474 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4476 if (!(pf->flags & I40E_FLAG_VXLAN)) {
4477 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4481 idx = i40e_get_vxlan_port_idx(pf, port);
4484 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4488 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4489 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4493 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4496 pf->vxlan_ports[idx] = 0;
4497 pf->vxlan_bitmap &= ~(1 << idx);
4499 if (!pf->vxlan_bitmap)
4500 pf->flags &= ~I40E_FLAG_VXLAN;
4505 /* Add UDP tunneling port */
4507 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4508 struct rte_eth_udp_tunnel *udp_tunnel)
4511 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4513 if (udp_tunnel == NULL)
4516 switch (udp_tunnel->prot_type) {
4517 case RTE_TUNNEL_TYPE_VXLAN:
4518 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4521 case RTE_TUNNEL_TYPE_GENEVE:
4522 case RTE_TUNNEL_TYPE_TEREDO:
4523 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4528 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4536 /* Remove UDP tunneling port */
4538 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4539 struct rte_eth_udp_tunnel *udp_tunnel)
4542 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4544 if (udp_tunnel == NULL)
4547 switch (udp_tunnel->prot_type) {
4548 case RTE_TUNNEL_TYPE_VXLAN:
4549 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4551 case RTE_TUNNEL_TYPE_GENEVE:
4552 case RTE_TUNNEL_TYPE_TEREDO:
4553 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4557 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4567 i40e_pf_config_rss(struct i40e_pf *pf)
4569 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4570 struct rte_eth_rss_conf rss_conf;
4571 uint32_t i, lut = 0;
4572 uint16_t j, num = i40e_prev_power_of_2(pf->dev_data->nb_rx_queues);
4574 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4577 lut = (lut << 8) | (j & ((0x1 <<
4578 hw->func_caps.rss_table_entry_width) - 1));
4580 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4583 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4584 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4585 i40e_pf_disable_rss(pf);
4588 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4589 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4590 /* Calculate the default hash key */
4591 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4592 rss_key_default[i] = (uint32_t)rte_rand();
4593 rss_conf.rss_key = (uint8_t *)rss_key_default;
4594 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4598 return i40e_hw_rss_hash_set(hw, &rss_conf);
4602 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
4603 struct rte_eth_tunnel_filter_conf *filter)
4605 if (pf == NULL || filter == NULL) {
4606 PMD_DRV_LOG(ERR, "Invalid parameter");
4610 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
4611 PMD_DRV_LOG(ERR, "Invalid queue ID");
4615 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
4616 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
4620 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
4621 (is_zero_ether_addr(filter->outer_mac))) {
4622 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
4626 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
4627 (is_zero_ether_addr(filter->inner_mac))) {
4628 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
4636 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4639 struct rte_eth_tunnel_filter_conf *filter;
4640 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4641 int ret = I40E_SUCCESS;
4643 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
4645 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
4646 return I40E_ERR_PARAM;
4648 switch (filter_op) {
4649 case RTE_ETH_FILTER_NOP:
4650 if (!(pf->flags & I40E_FLAG_VXLAN))
4651 ret = I40E_NOT_SUPPORTED;
4652 case RTE_ETH_FILTER_ADD:
4653 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
4655 case RTE_ETH_FILTER_DELETE:
4656 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
4659 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4660 ret = I40E_ERR_PARAM;
4668 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4670 if (!pf->dev_data->sriov.active) {
4671 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4673 i40e_pf_config_rss(pf);
4676 i40e_pf_disable_rss(pf);
4685 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4686 enum rte_filter_type filter_type,
4687 enum rte_filter_op filter_op,
4695 switch (filter_type) {
4696 case RTE_ETH_FILTER_MACVLAN:
4697 ret = i40e_mac_filter_handle(dev, filter_op, arg);
4699 case RTE_ETH_FILTER_TUNNEL:
4700 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
4703 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",