4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
52 #include <rte_eth_ctrl.h>
54 #include "i40e_logs.h"
55 #include "i40e/i40e_prototype.h"
56 #include "i40e/i40e_adminq_cmd.h"
57 #include "i40e/i40e_type.h"
58 #include "i40e_ethdev.h"
59 #include "i40e_rxtx.h"
62 #define I40E_DEFAULT_RX_FREE_THRESH 32
63 #define I40E_DEFAULT_RX_PTHRESH 8
64 #define I40E_DEFAULT_RX_HTHRESH 8
65 #define I40E_DEFAULT_RX_WTHRESH 0
67 #define I40E_DEFAULT_TX_FREE_THRESH 32
68 #define I40E_DEFAULT_TX_PTHRESH 32
69 #define I40E_DEFAULT_TX_HTHRESH 0
70 #define I40E_DEFAULT_TX_WTHRESH 0
71 #define I40E_DEFAULT_TX_RSBIT_THRESH 32
73 /* Maximun number of MAC addresses */
74 #define I40E_NUM_MACADDR_MAX 64
75 #define I40E_CLEAR_PXE_WAIT_MS 200
77 /* Maximun number of capability elements */
78 #define I40E_MAX_CAP_ELE_NUM 128
80 /* Wait count and inteval */
81 #define I40E_CHK_Q_ENA_COUNT 1000
82 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
84 /* Maximun number of VSI */
85 #define I40E_MAX_NUM_VSIS (384UL)
87 /* Bit shift and mask */
88 #define I40E_16_BIT_SHIFT 16
89 #define I40E_16_BIT_MASK 0xFFFF
90 #define I40E_32_BIT_SHIFT 32
91 #define I40E_32_BIT_MASK 0xFFFFFFFF
92 #define I40E_48_BIT_SHIFT 48
93 #define I40E_48_BIT_MASK 0xFFFFFFFFFFFFULL
95 /* Default queue interrupt throttling time in microseconds*/
96 #define I40E_ITR_INDEX_DEFAULT 0
97 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
98 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
100 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
102 /* Mask of PF interrupt causes */
103 #define I40E_PFINT_ICR0_ENA_MASK ( \
104 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
105 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
106 I40E_PFINT_ICR0_ENA_GRST_MASK | \
107 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
108 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
109 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
110 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
111 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
112 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
113 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
115 static int eth_i40e_dev_init(\
116 __attribute__((unused)) struct eth_driver *eth_drv,
117 struct rte_eth_dev *eth_dev);
118 static int i40e_dev_configure(struct rte_eth_dev *dev);
119 static int i40e_dev_start(struct rte_eth_dev *dev);
120 static void i40e_dev_stop(struct rte_eth_dev *dev);
121 static void i40e_dev_close(struct rte_eth_dev *dev);
122 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
123 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
124 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
125 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
126 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
127 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
128 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
129 struct rte_eth_stats *stats);
130 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
131 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
135 static void i40e_dev_info_get(struct rte_eth_dev *dev,
136 struct rte_eth_dev_info *dev_info);
137 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
140 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
141 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
142 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
145 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
146 static int i40e_dev_led_on(struct rte_eth_dev *dev);
147 static int i40e_dev_led_off(struct rte_eth_dev *dev);
148 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
149 struct rte_eth_fc_conf *fc_conf);
150 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
151 struct rte_eth_pfc_conf *pfc_conf);
152 static void i40e_macaddr_add(struct rte_eth_dev *dev,
153 struct ether_addr *mac_addr,
156 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
157 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
158 struct rte_eth_rss_reta *reta_conf);
159 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
160 struct rte_eth_rss_reta *reta_conf);
162 static int i40e_get_cap(struct i40e_hw *hw);
163 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
164 static int i40e_pf_setup(struct i40e_pf *pf);
165 static int i40e_vsi_init(struct i40e_vsi *vsi);
166 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
167 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
168 bool offset_loaded, uint64_t *offset, uint64_t *stat);
169 static void i40e_stat_update_48(struct i40e_hw *hw,
175 static void i40e_pf_config_irq0(struct i40e_hw *hw);
176 static void i40e_dev_interrupt_handler(
177 __rte_unused struct rte_intr_handle *handle, void *param);
178 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
179 uint32_t base, uint32_t num);
180 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
181 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
183 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
185 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
186 static int i40e_veb_release(struct i40e_veb *veb);
187 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
188 struct i40e_vsi *vsi);
189 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
190 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
191 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
192 struct i40e_macvlan_filter *mv_f,
194 struct ether_addr *addr);
195 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
196 struct i40e_macvlan_filter *mv_f,
199 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
200 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
201 struct rte_eth_rss_conf *rss_conf);
202 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
203 struct rte_eth_rss_conf *rss_conf);
204 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
205 struct rte_eth_udp_tunnel *udp_tunnel);
206 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
207 struct rte_eth_udp_tunnel *udp_tunnel);
208 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
209 enum rte_filter_type filter_type,
210 enum rte_filter_op filter_op,
213 /* Default hash key buffer for RSS */
214 static uint32_t rss_key_default[I40E_PFQF_HKEY_MAX_INDEX + 1];
216 static struct rte_pci_id pci_id_i40e_map[] = {
217 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
218 #include "rte_pci_dev_ids.h"
219 { .vendor_id = 0, /* sentinel */ },
222 static struct eth_dev_ops i40e_eth_dev_ops = {
223 .dev_configure = i40e_dev_configure,
224 .dev_start = i40e_dev_start,
225 .dev_stop = i40e_dev_stop,
226 .dev_close = i40e_dev_close,
227 .promiscuous_enable = i40e_dev_promiscuous_enable,
228 .promiscuous_disable = i40e_dev_promiscuous_disable,
229 .allmulticast_enable = i40e_dev_allmulticast_enable,
230 .allmulticast_disable = i40e_dev_allmulticast_disable,
231 .dev_set_link_up = i40e_dev_set_link_up,
232 .dev_set_link_down = i40e_dev_set_link_down,
233 .link_update = i40e_dev_link_update,
234 .stats_get = i40e_dev_stats_get,
235 .stats_reset = i40e_dev_stats_reset,
236 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
237 .dev_infos_get = i40e_dev_info_get,
238 .vlan_filter_set = i40e_vlan_filter_set,
239 .vlan_tpid_set = i40e_vlan_tpid_set,
240 .vlan_offload_set = i40e_vlan_offload_set,
241 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
242 .vlan_pvid_set = i40e_vlan_pvid_set,
243 .rx_queue_start = i40e_dev_rx_queue_start,
244 .rx_queue_stop = i40e_dev_rx_queue_stop,
245 .tx_queue_start = i40e_dev_tx_queue_start,
246 .tx_queue_stop = i40e_dev_tx_queue_stop,
247 .rx_queue_setup = i40e_dev_rx_queue_setup,
248 .rx_queue_release = i40e_dev_rx_queue_release,
249 .rx_queue_count = i40e_dev_rx_queue_count,
250 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
251 .tx_queue_setup = i40e_dev_tx_queue_setup,
252 .tx_queue_release = i40e_dev_tx_queue_release,
253 .dev_led_on = i40e_dev_led_on,
254 .dev_led_off = i40e_dev_led_off,
255 .flow_ctrl_set = i40e_flow_ctrl_set,
256 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
257 .mac_addr_add = i40e_macaddr_add,
258 .mac_addr_remove = i40e_macaddr_remove,
259 .reta_update = i40e_dev_rss_reta_update,
260 .reta_query = i40e_dev_rss_reta_query,
261 .rss_hash_update = i40e_dev_rss_hash_update,
262 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
263 .udp_tunnel_add = i40e_dev_udp_tunnel_add,
264 .udp_tunnel_del = i40e_dev_udp_tunnel_del,
265 .filter_ctrl = i40e_dev_filter_ctrl,
268 static struct eth_driver rte_i40e_pmd = {
270 .name = "rte_i40e_pmd",
271 .id_table = pci_id_i40e_map,
272 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
274 .eth_dev_init = eth_i40e_dev_init,
275 .dev_private_size = sizeof(struct i40e_adapter),
279 i40e_align_floor(int n)
283 return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
287 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
288 struct rte_eth_link *link)
290 struct rte_eth_link *dst = link;
291 struct rte_eth_link *src = &(dev->data->dev_link);
293 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
294 *(uint64_t *)src) == 0)
301 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
302 struct rte_eth_link *link)
304 struct rte_eth_link *dst = &(dev->data->dev_link);
305 struct rte_eth_link *src = link;
307 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
308 *(uint64_t *)src) == 0)
315 * Driver initialization routine.
316 * Invoked once at EAL init time.
317 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
320 rte_i40e_pmd_init(const char *name __rte_unused,
321 const char *params __rte_unused)
323 PMD_INIT_FUNC_TRACE();
324 rte_eth_driver_register(&rte_i40e_pmd);
329 static struct rte_driver rte_i40e_driver = {
331 .init = rte_i40e_pmd_init,
334 PMD_REGISTER_DRIVER(rte_i40e_driver);
337 eth_i40e_dev_init(__rte_unused struct eth_driver *eth_drv,
338 struct rte_eth_dev *dev)
340 struct rte_pci_device *pci_dev;
341 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
342 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
343 struct i40e_vsi *vsi;
348 PMD_INIT_FUNC_TRACE();
350 dev->dev_ops = &i40e_eth_dev_ops;
351 dev->rx_pkt_burst = i40e_recv_pkts;
352 dev->tx_pkt_burst = i40e_xmit_pkts;
354 /* for secondary processes, we don't initialise any further as primary
355 * has already done this work. Only check we don't need a different
357 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
358 if (dev->data->scattered_rx)
359 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
362 pci_dev = dev->pci_dev;
363 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
364 pf->adapter->eth_dev = dev;
365 pf->dev_data = dev->data;
367 hw->back = I40E_PF_TO_ADAPTER(pf);
368 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
370 PMD_INIT_LOG(ERR, "Hardware is not available, "
371 "as address is NULL");
375 hw->vendor_id = pci_dev->id.vendor_id;
376 hw->device_id = pci_dev->id.device_id;
377 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
378 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
379 hw->bus.device = pci_dev->addr.devid;
380 hw->bus.func = pci_dev->addr.function;
382 /* Make sure all is clean before doing PF reset */
385 /* Reset here to make sure all is clean for each PF */
386 ret = i40e_pf_reset(hw);
388 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
392 /* Initialize the shared code (base driver) */
393 ret = i40e_init_shared_code(hw);
395 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
399 /* Initialize the parameters for adminq */
400 i40e_init_adminq_parameter(hw);
401 ret = i40e_init_adminq(hw);
402 if (ret != I40E_SUCCESS) {
403 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
406 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
407 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
408 hw->aq.api_maj_ver, hw->aq.api_min_ver,
409 ((hw->nvm.version >> 12) & 0xf),
410 ((hw->nvm.version >> 4) & 0xff),
411 (hw->nvm.version & 0xf), hw->nvm.eetrack);
414 ret = i40e_aq_stop_lldp(hw, true, NULL);
415 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
416 PMD_INIT_LOG(INFO, "Failed to stop lldp");
419 i40e_clear_pxe_mode(hw);
421 /* Get hw capabilities */
422 ret = i40e_get_cap(hw);
423 if (ret != I40E_SUCCESS) {
424 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
425 goto err_get_capabilities;
428 /* Initialize parameters for PF */
429 ret = i40e_pf_parameter_init(dev);
431 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
432 goto err_parameter_init;
435 /* Initialize the queue management */
436 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
438 PMD_INIT_LOG(ERR, "Failed to init queue pool");
439 goto err_qp_pool_init;
441 ret = i40e_res_pool_init(&pf->msix_pool, 1,
442 hw->func_caps.num_msix_vectors - 1);
444 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
445 goto err_msix_pool_init;
448 /* Initialize lan hmc */
449 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
450 hw->func_caps.num_rx_qp, 0, 0);
451 if (ret != I40E_SUCCESS) {
452 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
453 goto err_init_lan_hmc;
456 /* Configure lan hmc */
457 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
458 if (ret != I40E_SUCCESS) {
459 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
460 goto err_configure_lan_hmc;
463 /* Get and check the mac address */
464 i40e_get_mac_addr(hw, hw->mac.addr);
465 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
466 PMD_INIT_LOG(ERR, "mac address is not valid");
468 goto err_get_mac_addr;
470 /* Copy the permanent MAC address */
471 ether_addr_copy((struct ether_addr *) hw->mac.addr,
472 (struct ether_addr *) hw->mac.perm_addr);
474 /* Disable flow control */
475 hw->fc.requested_mode = I40E_FC_NONE;
476 i40e_set_fc(hw, &aq_fail, TRUE);
478 /* PF setup, which includes VSI setup */
479 ret = i40e_pf_setup(pf);
481 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
482 goto err_setup_pf_switch;
487 /* Disable double vlan by default */
488 i40e_vsi_config_double_vlan(vsi, FALSE);
490 if (!vsi->max_macaddrs)
491 len = ETHER_ADDR_LEN;
493 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
495 /* Should be after VSI initialized */
496 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
497 if (!dev->data->mac_addrs) {
498 PMD_INIT_LOG(ERR, "Failed to allocated memory "
499 "for storing mac address");
502 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
503 &dev->data->mac_addrs[0]);
505 /* initialize pf host driver to setup SRIOV resource if applicable */
506 i40e_pf_host_init(dev);
508 /* register callback func to eal lib */
509 rte_intr_callback_register(&(pci_dev->intr_handle),
510 i40e_dev_interrupt_handler, (void *)dev);
512 /* configure and enable device interrupt */
513 i40e_pf_config_irq0(hw);
514 i40e_pf_enable_irq0(hw);
516 /* enable uio intr after callback register */
517 rte_intr_enable(&(pci_dev->intr_handle));
522 i40e_vsi_release(pf->main_vsi);
525 err_configure_lan_hmc:
526 (void)i40e_shutdown_lan_hmc(hw);
528 i40e_res_pool_destroy(&pf->msix_pool);
530 i40e_res_pool_destroy(&pf->qp_pool);
533 err_get_capabilities:
534 (void)i40e_shutdown_adminq(hw);
540 i40e_dev_configure(struct rte_eth_dev *dev)
543 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
546 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
547 * RSS setting have different requirements.
548 * General PMD driver call sequence are NIC init, configure,
549 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
550 * will try to lookup the VSI that specific queue belongs to if VMDQ
551 * applicable. So, VMDQ setting has to be done before
552 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
553 * For RSS setting, it will try to calculate actual configured RX queue
554 * number, which will be available after rx_queue_setup(). dev_start()
555 * function is good to place RSS setup.
557 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
558 ret = i40e_vmdq_setup(dev);
563 return i40e_dev_init_vlan(dev);
567 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
569 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
570 uint16_t msix_vect = vsi->msix_intr;
573 for (i = 0; i < vsi->nb_qps; i++) {
574 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
575 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
579 if (vsi->type != I40E_VSI_SRIOV) {
580 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
581 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
585 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
586 vsi->user_param + (msix_vect - 1);
588 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
590 I40E_WRITE_FLUSH(hw);
593 static inline uint16_t
594 i40e_calc_itr_interval(int16_t interval)
596 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
597 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
599 /* Convert to hardware count, as writing each 1 represents 2 us */
604 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
607 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
608 uint16_t msix_vect = vsi->msix_intr;
611 for (i = 0; i < vsi->nb_qps; i++)
612 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
614 /* Bind all RX queues to allocated MSIX interrupt */
615 for (i = 0; i < vsi->nb_qps; i++) {
616 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
617 I40E_QINT_RQCTL_ITR_INDX_MASK |
618 ((vsi->base_queue + i + 1) <<
619 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
620 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
621 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
623 if (i == vsi->nb_qps - 1)
624 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
625 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
628 /* Write first RX queue to Link list register as the head element */
629 if (vsi->type != I40E_VSI_SRIOV) {
631 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
633 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
635 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
636 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
638 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
639 msix_vect - 1), interval);
641 #ifndef I40E_GLINT_CTL
642 #define I40E_GLINT_CTL 0x0003F800
643 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
645 /* Disable auto-mask on enabling of all none-zero interrupt */
646 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
647 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
651 /* num_msix_vectors_vf needs to minus irq0 */
652 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
653 vsi->user_param + (msix_vect - 1);
655 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
656 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
657 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
660 I40E_WRITE_FLUSH(hw);
664 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
666 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
667 uint16_t interval = i40e_calc_itr_interval(\
668 RTE_LIBRTE_I40E_ITR_INTERVAL);
670 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
671 I40E_PFINT_DYN_CTLN_INTENA_MASK |
672 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
673 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
674 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
678 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
680 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
682 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
685 static inline uint8_t
686 i40e_parse_link_speed(uint16_t eth_link_speed)
688 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
690 switch (eth_link_speed) {
691 case ETH_LINK_SPEED_40G:
692 link_speed = I40E_LINK_SPEED_40GB;
694 case ETH_LINK_SPEED_20G:
695 link_speed = I40E_LINK_SPEED_20GB;
697 case ETH_LINK_SPEED_10G:
698 link_speed = I40E_LINK_SPEED_10GB;
700 case ETH_LINK_SPEED_1000:
701 link_speed = I40E_LINK_SPEED_1GB;
703 case ETH_LINK_SPEED_100:
704 link_speed = I40E_LINK_SPEED_100MB;
712 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
714 enum i40e_status_code status;
715 struct i40e_aq_get_phy_abilities_resp phy_ab;
716 struct i40e_aq_set_phy_config phy_conf;
717 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
718 I40E_AQ_PHY_FLAG_PAUSE_RX |
719 I40E_AQ_PHY_FLAG_LOW_POWER;
720 const uint8_t advt = I40E_LINK_SPEED_40GB |
721 I40E_LINK_SPEED_10GB |
722 I40E_LINK_SPEED_1GB |
723 I40E_LINK_SPEED_100MB;
726 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
731 memset(&phy_conf, 0, sizeof(phy_conf));
733 /* bits 0-2 use the values from get_phy_abilities_resp */
735 abilities |= phy_ab.abilities & mask;
737 /* update ablities and speed */
738 if (abilities & I40E_AQ_PHY_AN_ENABLED)
739 phy_conf.link_speed = advt;
741 phy_conf.link_speed = force_speed;
743 phy_conf.abilities = abilities;
745 /* use get_phy_abilities_resp value for the rest */
746 phy_conf.phy_type = phy_ab.phy_type;
747 phy_conf.eee_capability = phy_ab.eee_capability;
748 phy_conf.eeer = phy_ab.eeer_val;
749 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
751 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
752 phy_ab.abilities, phy_ab.link_speed);
753 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
754 phy_conf.abilities, phy_conf.link_speed);
756 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
764 i40e_apply_link_speed(struct rte_eth_dev *dev)
767 uint8_t abilities = 0;
768 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
769 struct rte_eth_conf *conf = &dev->data->dev_conf;
771 speed = i40e_parse_link_speed(conf->link_speed);
772 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
773 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
774 abilities |= I40E_AQ_PHY_AN_ENABLED;
776 abilities |= I40E_AQ_PHY_LINK_ENABLED;
778 return i40e_phy_conf_link(hw, abilities, speed);
782 i40e_dev_start(struct rte_eth_dev *dev)
784 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
785 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 struct i40e_vsi *vsi = pf->main_vsi;
789 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
790 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
791 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
792 dev->data->dev_conf.link_duplex,
798 ret = i40e_vsi_init(vsi);
799 if (ret != I40E_SUCCESS) {
800 PMD_DRV_LOG(ERR, "Failed to init VSI");
804 /* Map queues with MSIX interrupt */
805 i40e_vsi_queues_bind_intr(vsi);
806 i40e_vsi_enable_queues_intr(vsi);
808 /* Enable all queues which have been configured */
809 ret = i40e_vsi_switch_queues(vsi, TRUE);
810 if (ret != I40E_SUCCESS) {
811 PMD_DRV_LOG(ERR, "Failed to enable VSI");
815 /* Enable receiving broadcast packets */
816 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
817 ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
818 if (ret != I40E_SUCCESS)
819 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
822 /* Apply link configure */
823 ret = i40e_apply_link_speed(dev);
824 if (I40E_SUCCESS != ret) {
825 PMD_DRV_LOG(ERR, "Fail to apply link setting");
832 i40e_vsi_switch_queues(vsi, FALSE);
838 i40e_dev_stop(struct rte_eth_dev *dev)
840 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
841 struct i40e_vsi *vsi = pf->main_vsi;
843 /* Disable all queues */
844 i40e_vsi_switch_queues(vsi, FALSE);
847 i40e_dev_set_link_down(dev);
849 /* un-map queues with interrupt registers */
850 i40e_vsi_disable_queues_intr(vsi);
851 i40e_vsi_queues_unbind_intr(vsi);
855 i40e_dev_close(struct rte_eth_dev *dev)
857 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
858 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
861 PMD_INIT_FUNC_TRACE();
865 /* Disable interrupt */
866 i40e_pf_disable_irq0(hw);
867 rte_intr_disable(&(dev->pci_dev->intr_handle));
869 /* shutdown and destroy the HMC */
870 i40e_shutdown_lan_hmc(hw);
872 /* release all the existing VSIs and VEBs */
873 i40e_vsi_release(pf->main_vsi);
875 /* shutdown the adminq */
876 i40e_aq_queue_shutdown(hw, true);
877 i40e_shutdown_adminq(hw);
879 i40e_res_pool_destroy(&pf->qp_pool);
880 i40e_res_pool_destroy(&pf->msix_pool);
882 /* force a PF reset to clean anything leftover */
883 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
884 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
885 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
886 I40E_WRITE_FLUSH(hw);
890 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
892 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
893 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
894 struct i40e_vsi *vsi = pf->main_vsi;
897 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
899 if (status != I40E_SUCCESS)
900 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
902 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
904 if (status != I40E_SUCCESS)
905 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
910 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
912 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
913 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914 struct i40e_vsi *vsi = pf->main_vsi;
917 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
919 if (status != I40E_SUCCESS)
920 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
922 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
924 if (status != I40E_SUCCESS)
925 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
929 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
931 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
932 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
933 struct i40e_vsi *vsi = pf->main_vsi;
936 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
937 if (ret != I40E_SUCCESS)
938 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
942 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
944 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
945 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
946 struct i40e_vsi *vsi = pf->main_vsi;
949 if (dev->data->promiscuous == 1)
950 return; /* must remain in all_multicast mode */
952 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
953 vsi->seid, FALSE, NULL);
954 if (ret != I40E_SUCCESS)
955 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
959 * Set device link up.
962 i40e_dev_set_link_up(struct rte_eth_dev *dev)
964 /* re-apply link speed setting */
965 return i40e_apply_link_speed(dev);
969 * Set device link down.
972 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
974 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
975 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
976 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978 return i40e_phy_conf_link(hw, abilities, speed);
982 i40e_dev_link_update(struct rte_eth_dev *dev,
983 __rte_unused int wait_to_complete)
985 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 struct i40e_link_status link_status;
987 struct rte_eth_link link, old;
990 memset(&link, 0, sizeof(link));
991 memset(&old, 0, sizeof(old));
992 memset(&link_status, 0, sizeof(link_status));
993 rte_i40e_dev_atomic_read_link_status(dev, &old);
995 /* Get link status information from hardware */
996 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
997 if (status != I40E_SUCCESS) {
998 link.link_speed = ETH_LINK_SPEED_100;
999 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1000 PMD_DRV_LOG(ERR, "Failed to get link info");
1004 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1006 if (!link.link_status)
1009 /* i40e uses full duplex only */
1010 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1012 /* Parse the link status */
1013 switch (link_status.link_speed) {
1014 case I40E_LINK_SPEED_100MB:
1015 link.link_speed = ETH_LINK_SPEED_100;
1017 case I40E_LINK_SPEED_1GB:
1018 link.link_speed = ETH_LINK_SPEED_1000;
1020 case I40E_LINK_SPEED_10GB:
1021 link.link_speed = ETH_LINK_SPEED_10G;
1023 case I40E_LINK_SPEED_20GB:
1024 link.link_speed = ETH_LINK_SPEED_20G;
1026 case I40E_LINK_SPEED_40GB:
1027 link.link_speed = ETH_LINK_SPEED_40G;
1030 link.link_speed = ETH_LINK_SPEED_100;
1035 rte_i40e_dev_atomic_write_link_status(dev, &link);
1036 if (link.link_status == old.link_status)
1042 /* Get all the statistics of a VSI */
1044 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1046 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1047 struct i40e_eth_stats *nes = &vsi->eth_stats;
1048 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1049 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1051 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1052 vsi->offset_loaded, &oes->rx_bytes,
1054 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1055 vsi->offset_loaded, &oes->rx_unicast,
1057 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1058 vsi->offset_loaded, &oes->rx_multicast,
1059 &nes->rx_multicast);
1060 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1061 vsi->offset_loaded, &oes->rx_broadcast,
1062 &nes->rx_broadcast);
1063 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1064 &oes->rx_discards, &nes->rx_discards);
1065 /* GLV_REPC not supported */
1066 /* GLV_RMPC not supported */
1067 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1068 &oes->rx_unknown_protocol,
1069 &nes->rx_unknown_protocol);
1070 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1071 vsi->offset_loaded, &oes->tx_bytes,
1073 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1074 vsi->offset_loaded, &oes->tx_unicast,
1076 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1077 vsi->offset_loaded, &oes->tx_multicast,
1078 &nes->tx_multicast);
1079 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1080 vsi->offset_loaded, &oes->tx_broadcast,
1081 &nes->tx_broadcast);
1082 /* GLV_TDPC not supported */
1083 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1084 &oes->tx_errors, &nes->tx_errors);
1085 vsi->offset_loaded = true;
1087 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1089 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", nes->rx_bytes);
1090 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", nes->rx_unicast);
1091 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", nes->rx_multicast);
1092 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", nes->rx_broadcast);
1093 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", nes->rx_discards);
1094 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1095 nes->rx_unknown_protocol);
1096 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", nes->tx_bytes);
1097 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", nes->tx_unicast);
1098 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", nes->tx_multicast);
1099 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", nes->tx_broadcast);
1100 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", nes->tx_discards);
1101 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", nes->tx_errors);
1102 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1106 /* Get all statistics of a port */
1108 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1111 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1112 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1113 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1114 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1116 /* Get statistics of struct i40e_eth_stats */
1117 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1118 I40E_GLPRT_GORCL(hw->port),
1119 pf->offset_loaded, &os->eth.rx_bytes,
1121 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1122 I40E_GLPRT_UPRCL(hw->port),
1123 pf->offset_loaded, &os->eth.rx_unicast,
1124 &ns->eth.rx_unicast);
1125 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1126 I40E_GLPRT_MPRCL(hw->port),
1127 pf->offset_loaded, &os->eth.rx_multicast,
1128 &ns->eth.rx_multicast);
1129 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1130 I40E_GLPRT_BPRCL(hw->port),
1131 pf->offset_loaded, &os->eth.rx_broadcast,
1132 &ns->eth.rx_broadcast);
1133 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1134 pf->offset_loaded, &os->eth.rx_discards,
1135 &ns->eth.rx_discards);
1136 /* GLPRT_REPC not supported */
1137 /* GLPRT_RMPC not supported */
1138 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1140 &os->eth.rx_unknown_protocol,
1141 &ns->eth.rx_unknown_protocol);
1142 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1143 I40E_GLPRT_GOTCL(hw->port),
1144 pf->offset_loaded, &os->eth.tx_bytes,
1146 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1147 I40E_GLPRT_UPTCL(hw->port),
1148 pf->offset_loaded, &os->eth.tx_unicast,
1149 &ns->eth.tx_unicast);
1150 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1151 I40E_GLPRT_MPTCL(hw->port),
1152 pf->offset_loaded, &os->eth.tx_multicast,
1153 &ns->eth.tx_multicast);
1154 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1155 I40E_GLPRT_BPTCL(hw->port),
1156 pf->offset_loaded, &os->eth.tx_broadcast,
1157 &ns->eth.tx_broadcast);
1158 i40e_stat_update_32(hw, I40E_GLPRT_TDPC(hw->port),
1159 pf->offset_loaded, &os->eth.tx_discards,
1160 &ns->eth.tx_discards);
1161 /* GLPRT_TEPC not supported */
1163 /* additional port specific stats */
1164 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1165 pf->offset_loaded, &os->tx_dropped_link_down,
1166 &ns->tx_dropped_link_down);
1167 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1168 pf->offset_loaded, &os->crc_errors,
1170 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1171 pf->offset_loaded, &os->illegal_bytes,
1172 &ns->illegal_bytes);
1173 /* GLPRT_ERRBC not supported */
1174 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1175 pf->offset_loaded, &os->mac_local_faults,
1176 &ns->mac_local_faults);
1177 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1178 pf->offset_loaded, &os->mac_remote_faults,
1179 &ns->mac_remote_faults);
1180 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1181 pf->offset_loaded, &os->rx_length_errors,
1182 &ns->rx_length_errors);
1183 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1184 pf->offset_loaded, &os->link_xon_rx,
1186 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1187 pf->offset_loaded, &os->link_xoff_rx,
1189 for (i = 0; i < 8; i++) {
1190 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1192 &os->priority_xon_rx[i],
1193 &ns->priority_xon_rx[i]);
1194 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1196 &os->priority_xoff_rx[i],
1197 &ns->priority_xoff_rx[i]);
1199 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1200 pf->offset_loaded, &os->link_xon_tx,
1202 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1203 pf->offset_loaded, &os->link_xoff_tx,
1205 for (i = 0; i < 8; i++) {
1206 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1208 &os->priority_xon_tx[i],
1209 &ns->priority_xon_tx[i]);
1210 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1212 &os->priority_xoff_tx[i],
1213 &ns->priority_xoff_tx[i]);
1214 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1216 &os->priority_xon_2_xoff[i],
1217 &ns->priority_xon_2_xoff[i]);
1219 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1220 I40E_GLPRT_PRC64L(hw->port),
1221 pf->offset_loaded, &os->rx_size_64,
1223 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1224 I40E_GLPRT_PRC127L(hw->port),
1225 pf->offset_loaded, &os->rx_size_127,
1227 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1228 I40E_GLPRT_PRC255L(hw->port),
1229 pf->offset_loaded, &os->rx_size_255,
1231 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1232 I40E_GLPRT_PRC511L(hw->port),
1233 pf->offset_loaded, &os->rx_size_511,
1235 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1236 I40E_GLPRT_PRC1023L(hw->port),
1237 pf->offset_loaded, &os->rx_size_1023,
1239 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1240 I40E_GLPRT_PRC1522L(hw->port),
1241 pf->offset_loaded, &os->rx_size_1522,
1243 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1244 I40E_GLPRT_PRC9522L(hw->port),
1245 pf->offset_loaded, &os->rx_size_big,
1247 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1248 pf->offset_loaded, &os->rx_undersize,
1250 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1251 pf->offset_loaded, &os->rx_fragments,
1253 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1254 pf->offset_loaded, &os->rx_oversize,
1256 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1257 pf->offset_loaded, &os->rx_jabber,
1259 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1260 I40E_GLPRT_PTC64L(hw->port),
1261 pf->offset_loaded, &os->tx_size_64,
1263 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1264 I40E_GLPRT_PTC127L(hw->port),
1265 pf->offset_loaded, &os->tx_size_127,
1267 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1268 I40E_GLPRT_PTC255L(hw->port),
1269 pf->offset_loaded, &os->tx_size_255,
1271 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1272 I40E_GLPRT_PTC511L(hw->port),
1273 pf->offset_loaded, &os->tx_size_511,
1275 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1276 I40E_GLPRT_PTC1023L(hw->port),
1277 pf->offset_loaded, &os->tx_size_1023,
1279 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1280 I40E_GLPRT_PTC1522L(hw->port),
1281 pf->offset_loaded, &os->tx_size_1522,
1283 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1284 I40E_GLPRT_PTC9522L(hw->port),
1285 pf->offset_loaded, &os->tx_size_big,
1287 /* GLPRT_MSPDC not supported */
1288 /* GLPRT_XEC not supported */
1290 pf->offset_loaded = true;
1293 i40e_update_vsi_stats(pf->main_vsi);
1295 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1296 ns->eth.rx_broadcast;
1297 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1298 ns->eth.tx_broadcast;
1299 stats->ibytes = ns->eth.rx_bytes;
1300 stats->obytes = ns->eth.tx_bytes;
1301 stats->oerrors = ns->eth.tx_errors;
1302 stats->imcasts = ns->eth.rx_multicast;
1305 stats->ibadcrc = ns->crc_errors;
1306 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
1307 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1308 stats->imissed = ns->eth.rx_discards;
1309 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
1311 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1312 PMD_DRV_LOG(DEBUG, "rx_bytes: %lu", ns->eth.rx_bytes);
1313 PMD_DRV_LOG(DEBUG, "rx_unicast: %lu", ns->eth.rx_unicast);
1314 PMD_DRV_LOG(DEBUG, "rx_multicast: %lu", ns->eth.rx_multicast);
1315 PMD_DRV_LOG(DEBUG, "rx_broadcast: %lu", ns->eth.rx_broadcast);
1316 PMD_DRV_LOG(DEBUG, "rx_discards: %lu", ns->eth.rx_discards);
1317 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %lu",
1318 ns->eth.rx_unknown_protocol);
1319 PMD_DRV_LOG(DEBUG, "tx_bytes: %lu", ns->eth.tx_bytes);
1320 PMD_DRV_LOG(DEBUG, "tx_unicast: %lu", ns->eth.tx_unicast);
1321 PMD_DRV_LOG(DEBUG, "tx_multicast: %lu", ns->eth.tx_multicast);
1322 PMD_DRV_LOG(DEBUG, "tx_broadcast: %lu", ns->eth.tx_broadcast);
1323 PMD_DRV_LOG(DEBUG, "tx_discards: %lu", ns->eth.tx_discards);
1324 PMD_DRV_LOG(DEBUG, "tx_errors: %lu", ns->eth.tx_errors);
1326 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %lu",
1327 ns->tx_dropped_link_down);
1328 PMD_DRV_LOG(DEBUG, "crc_errors: %lu", ns->crc_errors);
1329 PMD_DRV_LOG(DEBUG, "illegal_bytes: %lu",
1331 PMD_DRV_LOG(DEBUG, "error_bytes: %lu", ns->error_bytes);
1332 PMD_DRV_LOG(DEBUG, "mac_local_faults: %lu",
1333 ns->mac_local_faults);
1334 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %lu",
1335 ns->mac_remote_faults);
1336 PMD_DRV_LOG(DEBUG, "rx_length_errors: %lu",
1337 ns->rx_length_errors);
1338 PMD_DRV_LOG(DEBUG, "link_xon_rx: %lu", ns->link_xon_rx);
1339 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %lu", ns->link_xoff_rx);
1340 for (i = 0; i < 8; i++) {
1341 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %lu",
1342 i, ns->priority_xon_rx[i]);
1343 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %lu",
1344 i, ns->priority_xoff_rx[i]);
1346 PMD_DRV_LOG(DEBUG, "link_xon_tx: %lu", ns->link_xon_tx);
1347 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %lu", ns->link_xoff_tx);
1348 for (i = 0; i < 8; i++) {
1349 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %lu",
1350 i, ns->priority_xon_tx[i]);
1351 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %lu",
1352 i, ns->priority_xoff_tx[i]);
1353 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %lu",
1354 i, ns->priority_xon_2_xoff[i]);
1356 PMD_DRV_LOG(DEBUG, "rx_size_64: %lu", ns->rx_size_64);
1357 PMD_DRV_LOG(DEBUG, "rx_size_127: %lu", ns->rx_size_127);
1358 PMD_DRV_LOG(DEBUG, "rx_size_255: %lu", ns->rx_size_255);
1359 PMD_DRV_LOG(DEBUG, "rx_size_511: %lu", ns->rx_size_511);
1360 PMD_DRV_LOG(DEBUG, "rx_size_1023: %lu", ns->rx_size_1023);
1361 PMD_DRV_LOG(DEBUG, "rx_size_1522: %lu", ns->rx_size_1522);
1362 PMD_DRV_LOG(DEBUG, "rx_size_big: %lu", ns->rx_size_big);
1363 PMD_DRV_LOG(DEBUG, "rx_undersize: %lu", ns->rx_undersize);
1364 PMD_DRV_LOG(DEBUG, "rx_fragments: %lu", ns->rx_fragments);
1365 PMD_DRV_LOG(DEBUG, "rx_oversize: %lu", ns->rx_oversize);
1366 PMD_DRV_LOG(DEBUG, "rx_jabber: %lu", ns->rx_jabber);
1367 PMD_DRV_LOG(DEBUG, "tx_size_64: %lu", ns->tx_size_64);
1368 PMD_DRV_LOG(DEBUG, "tx_size_127: %lu", ns->tx_size_127);
1369 PMD_DRV_LOG(DEBUG, "tx_size_255: %lu", ns->tx_size_255);
1370 PMD_DRV_LOG(DEBUG, "tx_size_511: %lu", ns->tx_size_511);
1371 PMD_DRV_LOG(DEBUG, "tx_size_1023: %lu", ns->tx_size_1023);
1372 PMD_DRV_LOG(DEBUG, "tx_size_1522: %lu", ns->tx_size_1522);
1373 PMD_DRV_LOG(DEBUG, "tx_size_big: %lu", ns->tx_size_big);
1374 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %lu",
1375 ns->mac_short_packet_dropped);
1376 PMD_DRV_LOG(DEBUG, "checksum_error: %lu",
1377 ns->checksum_error);
1378 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1381 /* Reset the statistics */
1383 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1385 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1387 /* It results in reloading the start point of each counter */
1388 pf->offset_loaded = false;
1392 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1393 __rte_unused uint16_t queue_id,
1394 __rte_unused uint8_t stat_idx,
1395 __rte_unused uint8_t is_rx)
1397 PMD_INIT_FUNC_TRACE();
1403 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1405 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1406 struct i40e_vsi *vsi = pf->main_vsi;
1408 dev_info->max_rx_queues = vsi->nb_qps;
1409 dev_info->max_tx_queues = vsi->nb_qps;
1410 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1411 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1412 dev_info->max_mac_addrs = vsi->max_macaddrs;
1413 dev_info->max_vfs = dev->pci_dev->max_vfs;
1414 dev_info->rx_offload_capa =
1415 DEV_RX_OFFLOAD_VLAN_STRIP |
1416 DEV_RX_OFFLOAD_IPV4_CKSUM |
1417 DEV_RX_OFFLOAD_UDP_CKSUM |
1418 DEV_RX_OFFLOAD_TCP_CKSUM;
1419 dev_info->tx_offload_capa =
1420 DEV_TX_OFFLOAD_VLAN_INSERT |
1421 DEV_TX_OFFLOAD_IPV4_CKSUM |
1422 DEV_TX_OFFLOAD_UDP_CKSUM |
1423 DEV_TX_OFFLOAD_TCP_CKSUM |
1424 DEV_TX_OFFLOAD_SCTP_CKSUM;
1426 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1428 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1429 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1430 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1432 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1436 dev_info->default_txconf = (struct rte_eth_txconf) {
1438 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1439 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1440 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1442 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1443 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1444 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
1447 if (pf->flags | I40E_FLAG_VMDQ) {
1448 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1449 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1450 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1451 pf->max_nb_vmdq_vsi;
1452 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1453 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1454 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1459 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1461 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1462 struct i40e_vsi *vsi = pf->main_vsi;
1463 PMD_INIT_FUNC_TRACE();
1466 return i40e_vsi_add_vlan(vsi, vlan_id);
1468 return i40e_vsi_delete_vlan(vsi, vlan_id);
1472 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1473 __rte_unused uint16_t tpid)
1475 PMD_INIT_FUNC_TRACE();
1479 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1481 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1482 struct i40e_vsi *vsi = pf->main_vsi;
1484 if (mask & ETH_VLAN_STRIP_MASK) {
1485 /* Enable or disable VLAN stripping */
1486 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1487 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1489 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1492 if (mask & ETH_VLAN_EXTEND_MASK) {
1493 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1494 i40e_vsi_config_double_vlan(vsi, TRUE);
1496 i40e_vsi_config_double_vlan(vsi, FALSE);
1501 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1502 __rte_unused uint16_t queue,
1503 __rte_unused int on)
1505 PMD_INIT_FUNC_TRACE();
1509 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1511 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1512 struct i40e_vsi *vsi = pf->main_vsi;
1513 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1514 struct i40e_vsi_vlan_pvid_info info;
1516 memset(&info, 0, sizeof(info));
1519 info.config.pvid = pvid;
1521 info.config.reject.tagged =
1522 data->dev_conf.txmode.hw_vlan_reject_tagged;
1523 info.config.reject.untagged =
1524 data->dev_conf.txmode.hw_vlan_reject_untagged;
1527 return i40e_vsi_vlan_pvid_set(vsi, &info);
1531 i40e_dev_led_on(struct rte_eth_dev *dev)
1533 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1534 uint32_t mode = i40e_led_get(hw);
1537 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1543 i40e_dev_led_off(struct rte_eth_dev *dev)
1545 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1546 uint32_t mode = i40e_led_get(hw);
1549 i40e_led_set(hw, 0, false);
1555 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1556 __rte_unused struct rte_eth_fc_conf *fc_conf)
1558 PMD_INIT_FUNC_TRACE();
1564 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1565 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1567 PMD_INIT_FUNC_TRACE();
1572 /* Add a MAC address, and update filters */
1574 i40e_macaddr_add(struct rte_eth_dev *dev,
1575 struct ether_addr *mac_addr,
1576 __rte_unused uint32_t index,
1579 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1580 struct i40e_mac_filter_info mac_filter;
1581 struct i40e_vsi *vsi;
1584 /* If VMDQ not enabled or configured, return */
1585 if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1586 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1587 pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1592 if (pool > pf->nb_cfg_vmdq_vsi) {
1593 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1594 pool, pf->nb_cfg_vmdq_vsi);
1598 (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1599 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1604 vsi = pf->vmdq[pool - 1].vsi;
1606 ret = i40e_vsi_add_mac(vsi, &mac_filter);
1607 if (ret != I40E_SUCCESS) {
1608 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1613 /* Remove a MAC address, and update filters */
1615 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1617 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1618 struct i40e_vsi *vsi;
1619 struct rte_eth_dev_data *data = dev->data;
1620 struct ether_addr *macaddr;
1625 macaddr = &(data->mac_addrs[index]);
1627 pool_sel = dev->data->mac_pool_sel[index];
1629 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1630 if (pool_sel & (1ULL << i)) {
1634 /* No VMDQ pool enabled or configured */
1635 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1636 (i > pf->nb_cfg_vmdq_vsi)) {
1637 PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1641 vsi = pf->vmdq[i - 1].vsi;
1643 ret = i40e_vsi_delete_mac(vsi, macaddr);
1646 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1653 /* Set perfect match or hash match of MAC and VLAN for a VF */
1655 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1656 struct rte_eth_mac_filter *filter,
1660 struct i40e_mac_filter_info mac_filter;
1661 struct ether_addr old_mac;
1662 struct ether_addr *new_mac;
1663 struct i40e_pf_vf *vf = NULL;
1668 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1671 hw = I40E_PF_TO_HW(pf);
1673 if (filter == NULL) {
1674 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1678 new_mac = &filter->mac_addr;
1680 if (is_zero_ether_addr(new_mac)) {
1681 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1685 vf_id = filter->dst_id;
1687 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1688 PMD_DRV_LOG(ERR, "Invalid argument.");
1691 vf = &pf->vfs[vf_id];
1693 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1694 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1699 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1700 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1702 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1705 mac_filter.filter_type = filter->filter_type;
1706 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1707 if (ret != I40E_SUCCESS) {
1708 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1711 ether_addr_copy(new_mac, &pf->dev_addr);
1713 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1715 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1716 if (ret != I40E_SUCCESS) {
1717 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1721 /* Clear device address as it has been removed */
1722 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1723 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1729 /* MAC filter handle */
1731 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1734 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1735 struct rte_eth_mac_filter *filter;
1736 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1737 int ret = I40E_NOT_SUPPORTED;
1739 filter = (struct rte_eth_mac_filter *)(arg);
1741 switch (filter_op) {
1742 case RTE_ETH_FILTER_NONE:
1745 case RTE_ETH_FILTER_ADD:
1746 i40e_pf_disable_irq0(hw);
1748 ret = i40e_vf_mac_filter_set(pf, filter, 1);
1749 i40e_pf_enable_irq0(hw);
1751 case RTE_ETH_FILTER_DELETE:
1752 i40e_pf_disable_irq0(hw);
1754 ret = i40e_vf_mac_filter_set(pf, filter, 0);
1755 i40e_pf_enable_irq0(hw);
1758 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1759 ret = I40E_ERR_PARAM;
1767 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1768 struct rte_eth_rss_reta *reta_conf)
1770 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1772 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1774 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1776 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1778 mask = (uint8_t)((reta_conf->mask_hi >>
1787 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1789 for (j = 0, lut = 0; j < 4; j++) {
1790 if (mask & (0x1 << j))
1791 lut |= reta_conf->reta[i + j] << (8 * j);
1793 lut |= l & (0xFF << (8 * j));
1795 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1802 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1803 struct rte_eth_rss_reta *reta_conf)
1805 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1807 uint8_t i, j, mask, max = ETH_RSS_RETA_NUM_ENTRIES / 2;
1809 for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
1811 mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
1813 mask = (uint8_t)((reta_conf->mask_hi >>
1819 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1820 for (j = 0; j < 4; j++) {
1821 if (mask & (0x1 << j))
1822 reta_conf->reta[i + j] =
1823 (uint8_t)((lut >> (8 * j)) & 0xFF);
1831 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
1832 * @hw: pointer to the HW structure
1833 * @mem: pointer to mem struct to fill out
1834 * @size: size of memory requested
1835 * @alignment: what to align the allocation to
1837 enum i40e_status_code
1838 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1839 struct i40e_dma_mem *mem,
1843 static uint64_t id = 0;
1844 const struct rte_memzone *mz = NULL;
1845 char z_name[RTE_MEMZONE_NAMESIZE];
1848 return I40E_ERR_PARAM;
1851 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
1852 #ifdef RTE_LIBRTE_XEN_DOM0
1853 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
1856 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
1859 return I40E_ERR_NO_MEMORY;
1864 #ifdef RTE_LIBRTE_XEN_DOM0
1865 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
1867 mem->pa = mz->phys_addr;
1870 return I40E_SUCCESS;
1874 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
1875 * @hw: pointer to the HW structure
1876 * @mem: ptr to mem struct to free
1878 enum i40e_status_code
1879 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1880 struct i40e_dma_mem *mem)
1882 if (!mem || !mem->va)
1883 return I40E_ERR_PARAM;
1888 return I40E_SUCCESS;
1892 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
1893 * @hw: pointer to the HW structure
1894 * @mem: pointer to mem struct to fill out
1895 * @size: size of memory requested
1897 enum i40e_status_code
1898 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1899 struct i40e_virt_mem *mem,
1903 return I40E_ERR_PARAM;
1906 mem->va = rte_zmalloc("i40e", size, 0);
1909 return I40E_SUCCESS;
1911 return I40E_ERR_NO_MEMORY;
1915 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
1916 * @hw: pointer to the HW structure
1917 * @mem: pointer to mem struct to free
1919 enum i40e_status_code
1920 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
1921 struct i40e_virt_mem *mem)
1924 return I40E_ERR_PARAM;
1929 return I40E_SUCCESS;
1933 i40e_init_spinlock_d(struct i40e_spinlock *sp)
1935 rte_spinlock_init(&sp->spinlock);
1939 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
1941 rte_spinlock_lock(&sp->spinlock);
1945 i40e_release_spinlock_d(struct i40e_spinlock *sp)
1947 rte_spinlock_unlock(&sp->spinlock);
1951 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
1957 * Get the hardware capabilities, which will be parsed
1958 * and saved into struct i40e_hw.
1961 i40e_get_cap(struct i40e_hw *hw)
1963 struct i40e_aqc_list_capabilities_element_resp *buf;
1964 uint16_t len, size = 0;
1967 /* Calculate a huge enough buff for saving response data temporarily */
1968 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
1969 I40E_MAX_CAP_ELE_NUM;
1970 buf = rte_zmalloc("i40e", len, 0);
1972 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1973 return I40E_ERR_NO_MEMORY;
1976 /* Get, parse the capabilities and save it to hw */
1977 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
1978 i40e_aqc_opc_list_func_capabilities, NULL);
1979 if (ret != I40E_SUCCESS)
1980 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
1982 /* Free the temporary buffer after being used */
1989 i40e_pf_parameter_init(struct rte_eth_dev *dev)
1991 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1992 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1993 uint16_t sum_queues = 0, sum_vsis, left_queues;
1995 /* First check if FW support SRIOV */
1996 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
1997 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2001 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2002 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2003 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2004 /* Allocate queues for pf */
2005 if (hw->func_caps.rss) {
2006 pf->flags |= I40E_FLAG_RSS;
2007 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2008 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2009 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2012 sum_queues = pf->lan_nb_qps;
2013 /* Default VSI is not counted in */
2015 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2017 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2018 pf->flags |= I40E_FLAG_SRIOV;
2019 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2020 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2021 PMD_INIT_LOG(ERR, "Config VF number %u, "
2022 "max supported %u.",
2023 dev->pci_dev->max_vfs,
2024 hw->func_caps.num_vfs);
2027 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2028 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2029 "max support %u queues.",
2030 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2033 pf->vf_num = dev->pci_dev->max_vfs;
2034 sum_queues += pf->vf_nb_qps * pf->vf_num;
2035 sum_vsis += pf->vf_num;
2036 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2037 pf->vf_num, pf->vf_nb_qps);
2041 if (hw->func_caps.vmdq) {
2042 pf->flags |= I40E_FLAG_VMDQ;
2043 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2044 pf->max_nb_vmdq_vsi = 1;
2046 * If VMDQ available, assume a single VSI can be created. Will adjust
2049 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2050 sum_vsis += pf->max_nb_vmdq_vsi;
2052 pf->vmdq_nb_qps = 0;
2053 pf->max_nb_vmdq_vsi = 0;
2055 pf->nb_cfg_vmdq_vsi = 0;
2057 if (hw->func_caps.fd) {
2058 pf->flags |= I40E_FLAG_FDIR;
2059 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2061 * Each flow director consumes one VSI and one queue,
2062 * but can't calculate out predictably here.
2066 if (sum_vsis > pf->max_num_vsi ||
2067 sum_queues > hw->func_caps.num_rx_qp) {
2068 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2069 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2070 pf->max_num_vsi, sum_vsis);
2071 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2072 hw->func_caps.num_rx_qp, sum_queues);
2076 /* Adjust VMDQ setting to support as many VMs as possible */
2077 if (pf->flags & I40E_FLAG_VMDQ) {
2078 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2080 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2081 pf->max_num_vsi - sum_vsis);
2083 /* Limit the max VMDQ number that rte_ether that can support */
2084 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2087 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2088 pf->max_nb_vmdq_vsi);
2089 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2092 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2094 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2095 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2096 sum_vsis, hw->func_caps.num_msix_vectors);
2099 return I40E_SUCCESS;
2103 i40e_pf_get_switch_config(struct i40e_pf *pf)
2105 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2106 struct i40e_aqc_get_switch_config_resp *switch_config;
2107 struct i40e_aqc_switch_config_element_resp *element;
2108 uint16_t start_seid = 0, num_reported;
2111 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2112 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2113 if (!switch_config) {
2114 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2118 /* Get the switch configurations */
2119 ret = i40e_aq_get_switch_config(hw, switch_config,
2120 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2121 if (ret != I40E_SUCCESS) {
2122 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2125 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2126 if (num_reported != 1) { /* The number should be 1 */
2127 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2131 /* Parse the switch configuration elements */
2132 element = &(switch_config->element[0]);
2133 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2134 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2135 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2137 PMD_DRV_LOG(INFO, "Unknown element type");
2140 rte_free(switch_config);
2146 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2149 struct pool_entry *entry;
2151 if (pool == NULL || num == 0)
2154 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2155 if (entry == NULL) {
2156 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2160 /* queue heap initialize */
2161 pool->num_free = num;
2162 pool->num_alloc = 0;
2164 LIST_INIT(&pool->alloc_list);
2165 LIST_INIT(&pool->free_list);
2167 /* Initialize element */
2171 LIST_INSERT_HEAD(&pool->free_list, entry, next);
2176 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2178 struct pool_entry *entry;
2183 LIST_FOREACH(entry, &pool->alloc_list, next) {
2184 LIST_REMOVE(entry, next);
2188 LIST_FOREACH(entry, &pool->free_list, next) {
2189 LIST_REMOVE(entry, next);
2194 pool->num_alloc = 0;
2196 LIST_INIT(&pool->alloc_list);
2197 LIST_INIT(&pool->free_list);
2201 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2204 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2205 uint32_t pool_offset;
2209 PMD_DRV_LOG(ERR, "Invalid parameter");
2213 pool_offset = base - pool->base;
2214 /* Lookup in alloc list */
2215 LIST_FOREACH(entry, &pool->alloc_list, next) {
2216 if (entry->base == pool_offset) {
2217 valid_entry = entry;
2218 LIST_REMOVE(entry, next);
2223 /* Not find, return */
2224 if (valid_entry == NULL) {
2225 PMD_DRV_LOG(ERR, "Failed to find entry");
2230 * Found it, move it to free list and try to merge.
2231 * In order to make merge easier, always sort it by qbase.
2232 * Find adjacent prev and last entries.
2235 LIST_FOREACH(entry, &pool->free_list, next) {
2236 if (entry->base > valid_entry->base) {
2244 /* Try to merge with next one*/
2246 /* Merge with next one */
2247 if (valid_entry->base + valid_entry->len == next->base) {
2248 next->base = valid_entry->base;
2249 next->len += valid_entry->len;
2250 rte_free(valid_entry);
2257 /* Merge with previous one */
2258 if (prev->base + prev->len == valid_entry->base) {
2259 prev->len += valid_entry->len;
2260 /* If it merge with next one, remove next node */
2262 LIST_REMOVE(valid_entry, next);
2263 rte_free(valid_entry);
2265 rte_free(valid_entry);
2271 /* Not find any entry to merge, insert */
2274 LIST_INSERT_AFTER(prev, valid_entry, next);
2275 else if (next != NULL)
2276 LIST_INSERT_BEFORE(next, valid_entry, next);
2277 else /* It's empty list, insert to head */
2278 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2281 pool->num_free += valid_entry->len;
2282 pool->num_alloc -= valid_entry->len;
2288 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2291 struct pool_entry *entry, *valid_entry;
2293 if (pool == NULL || num == 0) {
2294 PMD_DRV_LOG(ERR, "Invalid parameter");
2298 if (pool->num_free < num) {
2299 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2300 num, pool->num_free);
2305 /* Lookup in free list and find most fit one */
2306 LIST_FOREACH(entry, &pool->free_list, next) {
2307 if (entry->len >= num) {
2309 if (entry->len == num) {
2310 valid_entry = entry;
2313 if (valid_entry == NULL || valid_entry->len > entry->len)
2314 valid_entry = entry;
2318 /* Not find one to satisfy the request, return */
2319 if (valid_entry == NULL) {
2320 PMD_DRV_LOG(ERR, "No valid entry found");
2324 * The entry have equal queue number as requested,
2325 * remove it from alloc_list.
2327 if (valid_entry->len == num) {
2328 LIST_REMOVE(valid_entry, next);
2331 * The entry have more numbers than requested,
2332 * create a new entry for alloc_list and minus its
2333 * queue base and number in free_list.
2335 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2336 if (entry == NULL) {
2337 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2341 entry->base = valid_entry->base;
2343 valid_entry->base += num;
2344 valid_entry->len -= num;
2345 valid_entry = entry;
2348 /* Insert it into alloc list, not sorted */
2349 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2351 pool->num_free -= valid_entry->len;
2352 pool->num_alloc += valid_entry->len;
2354 return (valid_entry->base + pool->base);
2358 * bitmap_is_subset - Check whether src2 is subset of src1
2361 bitmap_is_subset(uint8_t src1, uint8_t src2)
2363 return !((src1 ^ src2) & src2);
2367 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2369 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2371 /* If DCB is not supported, only default TC is supported */
2372 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2373 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2377 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2378 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2379 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2383 return I40E_SUCCESS;
2387 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2388 struct i40e_vsi_vlan_pvid_info *info)
2391 struct i40e_vsi_context ctxt;
2392 uint8_t vlan_flags = 0;
2395 if (vsi == NULL || info == NULL) {
2396 PMD_DRV_LOG(ERR, "invalid parameters");
2397 return I40E_ERR_PARAM;
2401 vsi->info.pvid = info->config.pvid;
2403 * If insert pvid is enabled, only tagged pkts are
2404 * allowed to be sent out.
2406 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2407 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2410 if (info->config.reject.tagged == 0)
2411 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2413 if (info->config.reject.untagged == 0)
2414 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2416 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2417 I40E_AQ_VSI_PVLAN_MODE_MASK);
2418 vsi->info.port_vlan_flags |= vlan_flags;
2419 vsi->info.valid_sections =
2420 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2421 memset(&ctxt, 0, sizeof(ctxt));
2422 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2423 ctxt.seid = vsi->seid;
2425 hw = I40E_VSI_TO_HW(vsi);
2426 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2427 if (ret != I40E_SUCCESS)
2428 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2434 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2436 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2438 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2440 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2441 if (ret != I40E_SUCCESS)
2445 PMD_DRV_LOG(ERR, "seid not valid");
2449 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2450 tc_bw_data.tc_valid_bits = enabled_tcmap;
2451 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2452 tc_bw_data.tc_bw_credits[i] =
2453 (enabled_tcmap & (1 << i)) ? 1 : 0;
2455 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2456 if (ret != I40E_SUCCESS) {
2457 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2461 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2462 sizeof(vsi->info.qs_handle));
2463 return I40E_SUCCESS;
2467 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2468 struct i40e_aqc_vsi_properties_data *info,
2469 uint8_t enabled_tcmap)
2471 int ret, total_tc = 0, i;
2472 uint16_t qpnum_per_tc, bsf, qp_idx;
2474 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2475 if (ret != I40E_SUCCESS)
2478 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2479 if (enabled_tcmap & (1 << i))
2481 vsi->enabled_tc = enabled_tcmap;
2483 /* Number of queues per enabled TC */
2484 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2485 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2486 bsf = rte_bsf32(qpnum_per_tc);
2488 /* Adjust the queue number to actual queues that can be applied */
2489 vsi->nb_qps = qpnum_per_tc * total_tc;
2492 * Configure TC and queue mapping parameters, for enabled TC,
2493 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2494 * default queue will serve it.
2497 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2498 if (vsi->enabled_tc & (1 << i)) {
2499 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2500 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2501 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2502 qp_idx += qpnum_per_tc;
2504 info->tc_mapping[i] = 0;
2507 /* Associate queue number with VSI */
2508 if (vsi->type == I40E_VSI_SRIOV) {
2509 info->mapping_flags |=
2510 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2511 for (i = 0; i < vsi->nb_qps; i++)
2512 info->queue_mapping[i] =
2513 rte_cpu_to_le_16(vsi->base_queue + i);
2515 info->mapping_flags |=
2516 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2517 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2519 info->valid_sections =
2520 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2522 return I40E_SUCCESS;
2526 i40e_veb_release(struct i40e_veb *veb)
2528 struct i40e_vsi *vsi;
2531 if (veb == NULL || veb->associate_vsi == NULL)
2534 if (!TAILQ_EMPTY(&veb->head)) {
2535 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2539 vsi = veb->associate_vsi;
2540 hw = I40E_VSI_TO_HW(vsi);
2542 vsi->uplink_seid = veb->uplink_seid;
2543 i40e_aq_delete_element(hw, veb->seid, NULL);
2546 return I40E_SUCCESS;
2550 static struct i40e_veb *
2551 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2553 struct i40e_veb *veb;
2557 if (NULL == pf || vsi == NULL) {
2558 PMD_DRV_LOG(ERR, "veb setup failed, "
2559 "associated VSI shouldn't null");
2562 hw = I40E_PF_TO_HW(pf);
2564 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2566 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2570 veb->associate_vsi = vsi;
2571 TAILQ_INIT(&veb->head);
2572 veb->uplink_seid = vsi->uplink_seid;
2574 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2575 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2577 if (ret != I40E_SUCCESS) {
2578 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2579 hw->aq.asq_last_status);
2583 /* get statistics index */
2584 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2585 &veb->stats_idx, NULL, NULL, NULL);
2586 if (ret != I40E_SUCCESS) {
2587 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2588 hw->aq.asq_last_status);
2592 /* Get VEB bandwidth, to be implemented */
2593 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2594 vsi->uplink_seid = veb->seid;
2603 i40e_vsi_release(struct i40e_vsi *vsi)
2607 struct i40e_vsi_list *vsi_list;
2609 struct i40e_mac_filter *f;
2612 return I40E_SUCCESS;
2614 pf = I40E_VSI_TO_PF(vsi);
2615 hw = I40E_VSI_TO_HW(vsi);
2617 /* VSI has child to attach, release child first */
2619 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2620 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2622 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2624 i40e_veb_release(vsi->veb);
2627 /* Remove all macvlan filters of the VSI */
2628 i40e_vsi_remove_all_macvlan_filter(vsi);
2629 TAILQ_FOREACH(f, &vsi->mac_list, next)
2632 if (vsi->type != I40E_VSI_MAIN) {
2633 /* Remove vsi from parent's sibling list */
2634 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2635 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2636 return I40E_ERR_PARAM;
2638 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2639 &vsi->sib_vsi_list, list);
2641 /* Remove all switch element of the VSI */
2642 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2643 if (ret != I40E_SUCCESS)
2644 PMD_DRV_LOG(ERR, "Failed to delete element");
2646 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2648 if (vsi->type != I40E_VSI_SRIOV)
2649 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2652 return I40E_SUCCESS;
2656 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2658 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2659 struct i40e_aqc_remove_macvlan_element_data def_filter;
2660 struct i40e_mac_filter_info filter;
2663 if (vsi->type != I40E_VSI_MAIN)
2664 return I40E_ERR_CONFIG;
2665 memset(&def_filter, 0, sizeof(def_filter));
2666 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2668 def_filter.vlan_tag = 0;
2669 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2670 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2671 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2672 if (ret != I40E_SUCCESS) {
2673 struct i40e_mac_filter *f;
2674 struct ether_addr *mac;
2676 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2678 /* It needs to add the permanent mac into mac list */
2679 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2681 PMD_DRV_LOG(ERR, "failed to allocate memory");
2682 return I40E_ERR_NO_MEMORY;
2684 mac = &f->mac_info.mac_addr;
2685 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2687 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2688 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2693 (void)rte_memcpy(&filter.mac_addr,
2694 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2695 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2696 return i40e_vsi_add_mac(vsi, &filter);
2700 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2702 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2703 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2704 struct i40e_hw *hw = &vsi->adapter->hw;
2708 memset(&bw_config, 0, sizeof(bw_config));
2709 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2710 if (ret != I40E_SUCCESS) {
2711 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2712 hw->aq.asq_last_status);
2716 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2717 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2718 &ets_sla_config, NULL);
2719 if (ret != I40E_SUCCESS) {
2720 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2721 "configuration %u", hw->aq.asq_last_status);
2725 /* Not store the info yet, just print out */
2726 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2727 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2728 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2729 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2730 ets_sla_config.share_credits[i]);
2731 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2732 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2733 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2734 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2743 i40e_vsi_setup(struct i40e_pf *pf,
2744 enum i40e_vsi_type type,
2745 struct i40e_vsi *uplink_vsi,
2746 uint16_t user_param)
2748 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2749 struct i40e_vsi *vsi;
2750 struct i40e_mac_filter_info filter;
2752 struct i40e_vsi_context ctxt;
2753 struct ether_addr broadcast =
2754 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2756 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2757 PMD_DRV_LOG(ERR, "VSI setup failed, "
2758 "VSI link shouldn't be NULL");
2762 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2763 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2764 "uplink VSI should be NULL");
2768 /* If uplink vsi didn't setup VEB, create one first */
2769 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2770 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2772 if (NULL == uplink_vsi->veb) {
2773 PMD_DRV_LOG(ERR, "VEB setup failed");
2778 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2780 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2783 TAILQ_INIT(&vsi->mac_list);
2785 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2786 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2787 vsi->parent_vsi = uplink_vsi;
2788 vsi->user_param = user_param;
2789 /* Allocate queues */
2790 switch (vsi->type) {
2791 case I40E_VSI_MAIN :
2792 vsi->nb_qps = pf->lan_nb_qps;
2794 case I40E_VSI_SRIOV :
2795 vsi->nb_qps = pf->vf_nb_qps;
2797 case I40E_VSI_VMDQ2:
2798 vsi->nb_qps = pf->vmdq_nb_qps;
2803 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2805 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
2809 vsi->base_queue = ret;
2811 /* VF has MSIX interrupt in VF range, don't allocate here */
2812 if (type != I40E_VSI_SRIOV) {
2813 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
2815 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
2816 goto fail_queue_alloc;
2818 vsi->msix_intr = ret;
2822 if (type == I40E_VSI_MAIN) {
2823 /* For main VSI, no need to add since it's default one */
2824 vsi->uplink_seid = pf->mac_seid;
2825 vsi->seid = pf->main_vsi_seid;
2826 /* Bind queues with specific MSIX interrupt */
2828 * Needs 2 interrupt at least, one for misc cause which will
2829 * enabled from OS side, Another for queues binding the
2830 * interrupt from device side only.
2833 /* Get default VSI parameters from hardware */
2834 memset(&ctxt, 0, sizeof(ctxt));
2835 ctxt.seid = vsi->seid;
2836 ctxt.pf_num = hw->pf_id;
2837 ctxt.uplink_seid = vsi->uplink_seid;
2839 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
2840 if (ret != I40E_SUCCESS) {
2841 PMD_DRV_LOG(ERR, "Failed to get VSI params");
2842 goto fail_msix_alloc;
2844 (void)rte_memcpy(&vsi->info, &ctxt.info,
2845 sizeof(struct i40e_aqc_vsi_properties_data));
2846 vsi->vsi_id = ctxt.vsi_number;
2847 vsi->info.valid_sections = 0;
2849 /* Configure tc, enabled TC0 only */
2850 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
2852 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
2853 goto fail_msix_alloc;
2856 /* TC, queue mapping */
2857 memset(&ctxt, 0, sizeof(ctxt));
2858 vsi->info.valid_sections |=
2859 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2860 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2861 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2862 (void)rte_memcpy(&ctxt.info, &vsi->info,
2863 sizeof(struct i40e_aqc_vsi_properties_data));
2864 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2865 I40E_DEFAULT_TCMAP);
2866 if (ret != I40E_SUCCESS) {
2867 PMD_DRV_LOG(ERR, "Failed to configure "
2868 "TC queue mapping");
2869 goto fail_msix_alloc;
2871 ctxt.seid = vsi->seid;
2872 ctxt.pf_num = hw->pf_id;
2873 ctxt.uplink_seid = vsi->uplink_seid;
2876 /* Update VSI parameters */
2877 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2878 if (ret != I40E_SUCCESS) {
2879 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2880 goto fail_msix_alloc;
2883 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2884 sizeof(vsi->info.tc_mapping));
2885 (void)rte_memcpy(&vsi->info.queue_mapping,
2886 &ctxt.info.queue_mapping,
2887 sizeof(vsi->info.queue_mapping));
2888 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2889 vsi->info.valid_sections = 0;
2891 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
2895 * Updating default filter settings are necessary to prevent
2896 * reception of tagged packets.
2897 * Some old firmware configurations load a default macvlan
2898 * filter which accepts both tagged and untagged packets.
2899 * The updating is to use a normal filter instead if needed.
2900 * For NVM 4.2.2 or after, the updating is not needed anymore.
2901 * The firmware with correct configurations load the default
2902 * macvlan filter which is expected and cannot be removed.
2904 i40e_update_default_filter_setting(vsi);
2905 } else if (type == I40E_VSI_SRIOV) {
2906 memset(&ctxt, 0, sizeof(ctxt));
2908 * For other VSI, the uplink_seid equals to uplink VSI's
2909 * uplink_seid since they share same VEB
2911 vsi->uplink_seid = uplink_vsi->uplink_seid;
2912 ctxt.pf_num = hw->pf_id;
2913 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
2914 ctxt.uplink_seid = vsi->uplink_seid;
2915 ctxt.connection_type = 0x1;
2916 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
2918 /* Configure switch ID */
2919 ctxt.info.valid_sections |=
2920 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2921 ctxt.info.switch_id =
2922 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2923 /* Configure port/vlan */
2924 ctxt.info.valid_sections |=
2925 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2926 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2927 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2928 I40E_DEFAULT_TCMAP);
2929 if (ret != I40E_SUCCESS) {
2930 PMD_DRV_LOG(ERR, "Failed to configure "
2931 "TC queue mapping");
2932 goto fail_msix_alloc;
2934 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2935 ctxt.info.valid_sections |=
2936 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2938 * Since VSI is not created yet, only configure parameter,
2939 * will add vsi below.
2941 } else if (type == I40E_VSI_VMDQ2) {
2942 memset(&ctxt, 0, sizeof(ctxt));
2944 * For other VSI, the uplink_seid equals to uplink VSI's
2945 * uplink_seid since they share same VEB
2947 vsi->uplink_seid = uplink_vsi->uplink_seid;
2948 ctxt.pf_num = hw->pf_id;
2950 ctxt.uplink_seid = vsi->uplink_seid;
2951 ctxt.connection_type = 0x1;
2952 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
2954 ctxt.info.valid_sections |=
2955 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
2956 /* user_param carries flag to enable loop back */
2958 ctxt.info.switch_id =
2959 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
2960 ctxt.info.switch_id |=
2961 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
2964 /* Configure port/vlan */
2965 ctxt.info.valid_sections |=
2966 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2967 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
2968 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
2969 I40E_DEFAULT_TCMAP);
2970 if (ret != I40E_SUCCESS) {
2971 PMD_DRV_LOG(ERR, "Failed to configure "
2972 "TC queue mapping");
2973 goto fail_msix_alloc;
2975 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
2976 ctxt.info.valid_sections |=
2977 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
2979 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
2980 goto fail_msix_alloc;
2983 if (vsi->type != I40E_VSI_MAIN) {
2984 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
2986 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
2987 hw->aq.asq_last_status);
2988 goto fail_msix_alloc;
2990 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
2991 vsi->info.valid_sections = 0;
2992 vsi->seid = ctxt.seid;
2993 vsi->vsi_id = ctxt.vsi_number;
2994 vsi->sib_vsi_list.vsi = vsi;
2995 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
2996 &vsi->sib_vsi_list, list);
2999 /* MAC/VLAN configuration */
3000 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3001 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3003 ret = i40e_vsi_add_mac(vsi, &filter);
3004 if (ret != I40E_SUCCESS) {
3005 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3006 goto fail_msix_alloc;
3009 /* Get VSI BW information */
3010 i40e_vsi_dump_bw_config(vsi);
3013 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3015 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3021 /* Configure vlan stripping on or off */
3023 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3025 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3026 struct i40e_vsi_context ctxt;
3028 int ret = I40E_SUCCESS;
3030 /* Check if it has been already on or off */
3031 if (vsi->info.valid_sections &
3032 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3034 if ((vsi->info.port_vlan_flags &
3035 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3036 return 0; /* already on */
3038 if ((vsi->info.port_vlan_flags &
3039 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3040 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3041 return 0; /* already off */
3046 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3048 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3049 vsi->info.valid_sections =
3050 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3051 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3052 vsi->info.port_vlan_flags |= vlan_flags;
3053 ctxt.seid = vsi->seid;
3054 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3055 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3057 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3058 on ? "enable" : "disable");
3064 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3066 struct rte_eth_dev_data *data = dev->data;
3069 /* Apply vlan offload setting */
3070 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3072 /* Apply double-vlan setting, not implemented yet */
3074 /* Apply pvid setting */
3075 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3076 data->dev_conf.txmode.hw_vlan_insert_pvid);
3078 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3084 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3086 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3088 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3092 i40e_update_flow_control(struct i40e_hw *hw)
3094 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3095 struct i40e_link_status link_status;
3096 uint32_t rxfc = 0, txfc = 0, reg;
3100 memset(&link_status, 0, sizeof(link_status));
3101 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3102 if (ret != I40E_SUCCESS) {
3103 PMD_DRV_LOG(ERR, "Failed to get link status information");
3104 goto write_reg; /* Disable flow control */
3107 an_info = hw->phy.link_info.an_info;
3108 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3109 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3110 ret = I40E_ERR_NOT_READY;
3111 goto write_reg; /* Disable flow control */
3114 * If link auto negotiation is enabled, flow control needs to
3115 * be configured according to it
3117 switch (an_info & I40E_LINK_PAUSE_RXTX) {
3118 case I40E_LINK_PAUSE_RXTX:
3121 hw->fc.current_mode = I40E_FC_FULL;
3123 case I40E_AQ_LINK_PAUSE_RX:
3125 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3127 case I40E_AQ_LINK_PAUSE_TX:
3129 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3132 hw->fc.current_mode = I40E_FC_NONE;
3137 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3138 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3139 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3140 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3141 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3142 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3149 i40e_pf_setup(struct i40e_pf *pf)
3151 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3152 struct i40e_filter_control_settings settings;
3153 struct i40e_vsi *vsi;
3156 /* Clear all stats counters */
3157 pf->offset_loaded = FALSE;
3158 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3159 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3161 ret = i40e_pf_get_switch_config(pf);
3162 if (ret != I40E_SUCCESS) {
3163 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3168 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3170 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3171 return I40E_ERR_NOT_READY;
3175 /* Configure filter control */
3176 memset(&settings, 0, sizeof(settings));
3177 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3178 /* Enable ethtype and macvlan filters */
3179 settings.enable_ethtype = TRUE;
3180 settings.enable_macvlan = TRUE;
3181 ret = i40e_set_filter_control(hw, &settings);
3183 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3186 /* Update flow control according to the auto negotiation */
3187 i40e_update_flow_control(hw);
3189 return I40E_SUCCESS;
3193 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3199 * Set or clear TX Queue Disable flags,
3200 * which is required by hardware.
3202 i40e_pre_tx_queue_cfg(hw, q_idx, on);
3203 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3205 /* Wait until the request is finished */
3206 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3207 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3208 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3209 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3210 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3216 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3217 return I40E_SUCCESS; /* already on, skip next steps */
3219 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3220 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3222 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3223 return I40E_SUCCESS; /* already off, skip next steps */
3224 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3226 /* Write the register */
3227 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3228 /* Check the result */
3229 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3230 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3231 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3233 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3234 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3237 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3238 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3242 /* Check if it is timeout */
3243 if (j >= I40E_CHK_Q_ENA_COUNT) {
3244 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3245 (on ? "enable" : "disable"), q_idx);
3246 return I40E_ERR_TIMEOUT;
3249 return I40E_SUCCESS;
3252 /* Swith on or off the tx queues */
3254 i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
3256 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3257 struct i40e_tx_queue *txq;
3258 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3262 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3263 txq = dev_data->tx_queues[i];
3264 /* Don't operate the queue if not configured or
3265 * if starting only per queue */
3266 if (!txq->q_set || (on && txq->tx_deferred_start))
3269 ret = i40e_dev_tx_queue_start(dev, i);
3271 ret = i40e_dev_tx_queue_stop(dev, i);
3272 if ( ret != I40E_SUCCESS)
3276 return I40E_SUCCESS;
3280 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3285 /* Wait until the request is finished */
3286 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3287 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3288 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3289 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3290 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3295 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3296 return I40E_SUCCESS; /* Already on, skip next steps */
3297 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3299 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3300 return I40E_SUCCESS; /* Already off, skip next steps */
3301 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3304 /* Write the register */
3305 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3306 /* Check the result */
3307 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3308 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3309 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3311 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3312 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3315 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3316 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3321 /* Check if it is timeout */
3322 if (j >= I40E_CHK_Q_ENA_COUNT) {
3323 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3324 (on ? "enable" : "disable"), q_idx);
3325 return I40E_ERR_TIMEOUT;
3328 return I40E_SUCCESS;
3330 /* Switch on or off the rx queues */
3332 i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
3334 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
3335 struct i40e_rx_queue *rxq;
3336 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
3340 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3341 rxq = dev_data->rx_queues[i];
3342 /* Don't operate the queue if not configured or
3343 * if starting only per queue */
3344 if (!rxq->q_set || (on && rxq->rx_deferred_start))
3347 ret = i40e_dev_rx_queue_start(dev, i);
3349 ret = i40e_dev_rx_queue_stop(dev, i);
3350 if (ret != I40E_SUCCESS)
3354 return I40E_SUCCESS;
3357 /* Switch on or off all the rx/tx queues */
3359 i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
3364 /* enable rx queues before enabling tx queues */
3365 ret = i40e_vsi_switch_rx_queues(vsi, on);
3367 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3370 ret = i40e_vsi_switch_tx_queues(vsi, on);
3372 /* Stop tx queues before stopping rx queues */
3373 ret = i40e_vsi_switch_tx_queues(vsi, on);
3375 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3378 ret = i40e_vsi_switch_rx_queues(vsi, on);
3384 /* Initialize VSI for TX */
3386 i40e_vsi_tx_init(struct i40e_vsi *vsi)
3388 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3389 struct rte_eth_dev_data *data = pf->dev_data;
3391 uint32_t ret = I40E_SUCCESS;
3393 for (i = 0; i < data->nb_tx_queues; i++) {
3394 ret = i40e_tx_queue_init(data->tx_queues[i]);
3395 if (ret != I40E_SUCCESS)
3402 /* Initialize VSI for RX */
3404 i40e_vsi_rx_init(struct i40e_vsi *vsi)
3406 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
3407 struct rte_eth_dev_data *data = pf->dev_data;
3408 int ret = I40E_SUCCESS;
3411 i40e_pf_config_mq_rx(pf);
3412 for (i = 0; i < data->nb_rx_queues; i++) {
3413 ret = i40e_rx_queue_init(data->rx_queues[i]);
3414 if (ret != I40E_SUCCESS) {
3415 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3424 /* Initialize VSI */
3426 i40e_vsi_init(struct i40e_vsi *vsi)
3430 err = i40e_vsi_tx_init(vsi);
3432 PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
3435 err = i40e_vsi_rx_init(vsi);
3437 PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
3445 i40e_vmdq_setup(struct rte_eth_dev *dev)
3447 struct rte_eth_conf *conf = &dev->data->dev_conf;
3448 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3449 int i, err, conf_vsis, j, loop;
3450 struct i40e_vsi *vsi;
3451 struct i40e_vmdq_info *vmdq_info;
3452 struct rte_eth_vmdq_rx_conf *vmdq_conf;
3453 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3456 * Disable interrupt to avoid message from VF. Furthermore, it will
3457 * avoid race condition in VSI creation/destroy.
3459 i40e_pf_disable_irq0(hw);
3461 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3462 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3466 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3467 if (conf_vsis > pf->max_nb_vmdq_vsi) {
3468 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3469 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3470 pf->max_nb_vmdq_vsi);
3474 if (pf->vmdq != NULL) {
3475 PMD_INIT_LOG(INFO, "VMDQ already configured");
3479 pf->vmdq = rte_zmalloc("vmdq_info_struct",
3480 sizeof(*vmdq_info) * conf_vsis, 0);
3482 if (pf->vmdq == NULL) {
3483 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3487 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3489 /* Create VMDQ VSI */
3490 for (i = 0; i < conf_vsis; i++) {
3491 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3492 vmdq_conf->enable_loop_back);
3494 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3498 vmdq_info = &pf->vmdq[i];
3500 vmdq_info->vsi = vsi;
3502 pf->nb_cfg_vmdq_vsi = conf_vsis;
3504 /* Configure Vlan */
3505 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3506 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3507 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3508 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3509 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3510 vmdq_conf->pool_map[i].vlan_id, j);
3512 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3513 vmdq_conf->pool_map[i].vlan_id);
3515 PMD_INIT_LOG(ERR, "Failed to add vlan");
3523 i40e_pf_enable_irq0(hw);
3528 for (i = 0; i < conf_vsis; i++)
3529 if (pf->vmdq[i].vsi == NULL)
3532 i40e_vsi_release(pf->vmdq[i].vsi);
3536 i40e_pf_enable_irq0(hw);
3541 i40e_stat_update_32(struct i40e_hw *hw,
3549 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3553 if (new_data >= *offset)
3554 *stat = (uint64_t)(new_data - *offset);
3556 *stat = (uint64_t)((new_data +
3557 ((uint64_t)1 << I40E_32_BIT_SHIFT)) - *offset);
3561 i40e_stat_update_48(struct i40e_hw *hw,
3570 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3571 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3572 I40E_16_BIT_MASK)) << I40E_32_BIT_SHIFT;
3577 if (new_data >= *offset)
3578 *stat = new_data - *offset;
3580 *stat = (uint64_t)((new_data +
3581 ((uint64_t)1 << I40E_48_BIT_SHIFT)) - *offset);
3583 *stat &= I40E_48_BIT_MASK;
3588 i40e_pf_disable_irq0(struct i40e_hw *hw)
3590 /* Disable all interrupt types */
3591 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3592 I40E_WRITE_FLUSH(hw);
3597 i40e_pf_enable_irq0(struct i40e_hw *hw)
3599 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3600 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3601 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3602 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3603 I40E_WRITE_FLUSH(hw);
3607 i40e_pf_config_irq0(struct i40e_hw *hw)
3609 /* read pending request and disable first */
3610 i40e_pf_disable_irq0(hw);
3611 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3612 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3613 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3615 /* Link no queues with irq0 */
3616 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3617 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3621 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3623 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3624 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3627 uint32_t index, offset, val;
3632 * Try to find which VF trigger a reset, use absolute VF id to access
3633 * since the reg is global register.
3635 for (i = 0; i < pf->vf_num; i++) {
3636 abs_vf_id = hw->func_caps.vf_base_id + i;
3637 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3638 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3639 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3640 /* VFR event occured */
3641 if (val & (0x1 << offset)) {
3644 /* Clear the event first */
3645 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3647 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3649 * Only notify a VF reset event occured,
3650 * don't trigger another SW reset
3652 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3653 if (ret != I40E_SUCCESS)
3654 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3660 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3662 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3663 struct i40e_arq_event_info info;
3664 uint16_t pending, opcode;
3667 info.buf_len = I40E_AQ_BUF_SZ;
3668 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3669 if (!info.msg_buf) {
3670 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3676 ret = i40e_clean_arq_element(hw, &info, &pending);
3678 if (ret != I40E_SUCCESS) {
3679 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3680 "aq_err: %u", hw->aq.asq_last_status);
3683 opcode = rte_le_to_cpu_16(info.desc.opcode);
3686 case i40e_aqc_opc_send_msg_to_pf:
3687 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3688 i40e_pf_host_handle_vf_msg(dev,
3689 rte_le_to_cpu_16(info.desc.retval),
3690 rte_le_to_cpu_32(info.desc.cookie_high),
3691 rte_le_to_cpu_32(info.desc.cookie_low),
3696 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3701 rte_free(info.msg_buf);
3705 * Interrupt handler is registered as the alarm callback for handling LSC
3706 * interrupt in a definite of time, in order to wait the NIC into a stable
3707 * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3708 * no need for link down interrupt.
3711 i40e_dev_interrupt_delayed_handler(void *param)
3713 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3714 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3717 /* read interrupt causes again */
3718 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3720 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3721 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3722 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3723 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3724 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3725 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3726 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3727 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3728 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3729 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3730 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3732 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3733 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3734 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3735 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3736 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3738 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3739 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3740 i40e_dev_handle_vfr_event(dev);
3742 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3743 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3744 i40e_dev_handle_aq_msg(dev);
3747 /* handle the link up interrupt in an alarm callback */
3748 i40e_dev_link_update(dev, 0);
3749 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3751 i40e_pf_enable_irq0(hw);
3752 rte_intr_enable(&(dev->pci_dev->intr_handle));
3756 * Interrupt handler triggered by NIC for handling
3757 * specific interrupt.
3760 * Pointer to interrupt handle.
3762 * The address of parameter (struct rte_eth_dev *) regsitered before.
3768 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
3771 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3772 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3775 /* Disable interrupt */
3776 i40e_pf_disable_irq0(hw);
3778 /* read out interrupt causes */
3779 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3781 /* No interrupt event indicated */
3782 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
3783 PMD_DRV_LOG(INFO, "No interrupt event");
3786 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3787 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3788 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
3789 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3790 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
3791 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3792 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
3793 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3794 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
3795 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3796 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
3797 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3798 PMD_DRV_LOG(ERR, "ICR0: HMC error");
3799 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3800 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
3801 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3803 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3804 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
3805 i40e_dev_handle_vfr_event(dev);
3807 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3808 PMD_DRV_LOG(INFO, "ICR0: adminq event");
3809 i40e_dev_handle_aq_msg(dev);
3812 /* Link Status Change interrupt */
3813 if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
3814 #define I40E_US_PER_SECOND 1000000
3815 struct rte_eth_link link;
3817 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
3818 memset(&link, 0, sizeof(link));
3819 rte_i40e_dev_atomic_read_link_status(dev, &link);
3820 i40e_dev_link_update(dev, 0);
3823 * For link up interrupt, it needs to wait 1 second to let the
3824 * hardware be a stable state. Otherwise several consecutive
3825 * interrupts can be observed.
3826 * For link down interrupt, no need to wait.
3828 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
3829 i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
3832 _rte_eth_dev_callback_process(dev,
3833 RTE_ETH_EVENT_INTR_LSC);
3837 /* Enable interrupt */
3838 i40e_pf_enable_irq0(hw);
3839 rte_intr_enable(&(dev->pci_dev->intr_handle));
3843 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
3844 struct i40e_macvlan_filter *filter,
3847 int ele_num, ele_buff_size;
3848 int num, actual_num, i;
3850 int ret = I40E_SUCCESS;
3851 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3852 struct i40e_aqc_add_macvlan_element_data *req_list;
3854 if (filter == NULL || total == 0)
3855 return I40E_ERR_PARAM;
3856 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3857 ele_buff_size = hw->aq.asq_buf_size;
3859 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
3860 if (req_list == NULL) {
3861 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3862 return I40E_ERR_NO_MEMORY;
3867 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3868 memset(req_list, 0, ele_buff_size);
3870 for (i = 0; i < actual_num; i++) {
3871 (void)rte_memcpy(req_list[i].mac_addr,
3872 &filter[num + i].macaddr, ETH_ADDR_LEN);
3873 req_list[i].vlan_tag =
3874 rte_cpu_to_le_16(filter[num + i].vlan_id);
3876 switch (filter[num + i].filter_type) {
3877 case RTE_MAC_PERFECT_MATCH:
3878 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
3879 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3881 case RTE_MACVLAN_PERFECT_MATCH:
3882 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
3884 case RTE_MAC_HASH_MATCH:
3885 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
3886 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
3888 case RTE_MACVLAN_HASH_MATCH:
3889 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
3892 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
3893 ret = I40E_ERR_PARAM;
3897 req_list[i].queue_number = 0;
3899 req_list[i].flags = rte_cpu_to_le_16(flags);
3902 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
3904 if (ret != I40E_SUCCESS) {
3905 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
3909 } while (num < total);
3917 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
3918 struct i40e_macvlan_filter *filter,
3921 int ele_num, ele_buff_size;
3922 int num, actual_num, i;
3924 int ret = I40E_SUCCESS;
3925 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3926 struct i40e_aqc_remove_macvlan_element_data *req_list;
3928 if (filter == NULL || total == 0)
3929 return I40E_ERR_PARAM;
3931 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
3932 ele_buff_size = hw->aq.asq_buf_size;
3934 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
3935 if (req_list == NULL) {
3936 PMD_DRV_LOG(ERR, "Fail to allocate memory");
3937 return I40E_ERR_NO_MEMORY;
3942 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
3943 memset(req_list, 0, ele_buff_size);
3945 for (i = 0; i < actual_num; i++) {
3946 (void)rte_memcpy(req_list[i].mac_addr,
3947 &filter[num + i].macaddr, ETH_ADDR_LEN);
3948 req_list[i].vlan_tag =
3949 rte_cpu_to_le_16(filter[num + i].vlan_id);
3951 switch (filter[num + i].filter_type) {
3952 case RTE_MAC_PERFECT_MATCH:
3953 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
3954 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3956 case RTE_MACVLAN_PERFECT_MATCH:
3957 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
3959 case RTE_MAC_HASH_MATCH:
3960 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
3961 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
3963 case RTE_MACVLAN_HASH_MATCH:
3964 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
3967 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
3968 ret = I40E_ERR_PARAM;
3971 req_list[i].flags = rte_cpu_to_le_16(flags);
3974 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
3976 if (ret != I40E_SUCCESS) {
3977 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
3981 } while (num < total);
3988 /* Find out specific MAC filter */
3989 static struct i40e_mac_filter *
3990 i40e_find_mac_filter(struct i40e_vsi *vsi,
3991 struct ether_addr *macaddr)
3993 struct i40e_mac_filter *f;
3995 TAILQ_FOREACH(f, &vsi->mac_list, next) {
3996 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4004 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4007 uint32_t vid_idx, vid_bit;
4009 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
4010 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
4012 if (vsi->vfta[vid_idx] & vid_bit)
4019 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4020 uint16_t vlan_id, bool on)
4022 uint32_t vid_idx, vid_bit;
4024 #define UINT32_BIT_MASK 0x1F
4025 #define VALID_VLAN_BIT_MASK 0xFFF
4026 /* VFTA is 32-bits size array, each element contains 32 vlan bits, Find the
4027 * element first, then find the bits it belongs to
4029 vid_idx = (uint32_t) ((vlan_id & VALID_VLAN_BIT_MASK) >>
4031 vid_bit = (uint32_t) (1 << (vlan_id & UINT32_BIT_MASK));
4034 vsi->vfta[vid_idx] |= vid_bit;
4036 vsi->vfta[vid_idx] &= ~vid_bit;
4040 * Find all vlan options for specific mac addr,
4041 * return with actual vlan found.
4044 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4045 struct i40e_macvlan_filter *mv_f,
4046 int num, struct ether_addr *addr)
4052 * Not to use i40e_find_vlan_filter to decrease the loop time,
4053 * although the code looks complex.
4055 if (num < vsi->vlan_num)
4056 return I40E_ERR_PARAM;
4059 for (j = 0; j < I40E_VFTA_SIZE; j++) {
4061 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4062 if (vsi->vfta[j] & (1 << k)) {
4064 PMD_DRV_LOG(ERR, "vlan number "
4066 return I40E_ERR_PARAM;
4068 (void)rte_memcpy(&mv_f[i].macaddr,
4069 addr, ETH_ADDR_LEN);
4071 j * I40E_UINT32_BIT_SIZE + k;
4077 return I40E_SUCCESS;
4081 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4082 struct i40e_macvlan_filter *mv_f,
4087 struct i40e_mac_filter *f;
4089 if (num < vsi->mac_num)
4090 return I40E_ERR_PARAM;
4092 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4094 PMD_DRV_LOG(ERR, "buffer number not match");
4095 return I40E_ERR_PARAM;
4097 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4099 mv_f[i].vlan_id = vlan;
4100 mv_f[i].filter_type = f->mac_info.filter_type;
4104 return I40E_SUCCESS;
4108 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4111 struct i40e_mac_filter *f;
4112 struct i40e_macvlan_filter *mv_f;
4113 int ret = I40E_SUCCESS;
4115 if (vsi == NULL || vsi->mac_num == 0)
4116 return I40E_ERR_PARAM;
4118 /* Case that no vlan is set */
4119 if (vsi->vlan_num == 0)
4122 num = vsi->mac_num * vsi->vlan_num;
4124 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4126 PMD_DRV_LOG(ERR, "failed to allocate memory");
4127 return I40E_ERR_NO_MEMORY;
4131 if (vsi->vlan_num == 0) {
4132 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4133 (void)rte_memcpy(&mv_f[i].macaddr,
4134 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4135 mv_f[i].vlan_id = 0;
4139 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4140 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4141 vsi->vlan_num, &f->mac_info.mac_addr);
4142 if (ret != I40E_SUCCESS)
4148 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4156 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4158 struct i40e_macvlan_filter *mv_f;
4160 int ret = I40E_SUCCESS;
4162 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4163 return I40E_ERR_PARAM;
4165 /* If it's already set, just return */
4166 if (i40e_find_vlan_filter(vsi,vlan))
4167 return I40E_SUCCESS;
4169 mac_num = vsi->mac_num;
4172 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4173 return I40E_ERR_PARAM;
4176 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4179 PMD_DRV_LOG(ERR, "failed to allocate memory");
4180 return I40E_ERR_NO_MEMORY;
4183 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4185 if (ret != I40E_SUCCESS)
4188 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4190 if (ret != I40E_SUCCESS)
4193 i40e_set_vlan_filter(vsi, vlan, 1);
4203 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4205 struct i40e_macvlan_filter *mv_f;
4207 int ret = I40E_SUCCESS;
4210 * Vlan 0 is the generic filter for untagged packets
4211 * and can't be removed.
4213 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4214 return I40E_ERR_PARAM;
4216 /* If can't find it, just return */
4217 if (!i40e_find_vlan_filter(vsi, vlan))
4218 return I40E_ERR_PARAM;
4220 mac_num = vsi->mac_num;
4223 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4224 return I40E_ERR_PARAM;
4227 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4230 PMD_DRV_LOG(ERR, "failed to allocate memory");
4231 return I40E_ERR_NO_MEMORY;
4234 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4236 if (ret != I40E_SUCCESS)
4239 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4241 if (ret != I40E_SUCCESS)
4244 /* This is last vlan to remove, replace all mac filter with vlan 0 */
4245 if (vsi->vlan_num == 1) {
4246 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4247 if (ret != I40E_SUCCESS)
4250 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4251 if (ret != I40E_SUCCESS)
4255 i40e_set_vlan_filter(vsi, vlan, 0);
4265 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4267 struct i40e_mac_filter *f;
4268 struct i40e_macvlan_filter *mv_f;
4269 int i, vlan_num = 0;
4270 int ret = I40E_SUCCESS;
4272 /* If it's add and we've config it, return */
4273 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4275 return I40E_SUCCESS;
4276 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4277 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4280 * If vlan_num is 0, that's the first time to add mac,
4281 * set mask for vlan_id 0.
4283 if (vsi->vlan_num == 0) {
4284 i40e_set_vlan_filter(vsi, 0, 1);
4287 vlan_num = vsi->vlan_num;
4288 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4289 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4292 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4294 PMD_DRV_LOG(ERR, "failed to allocate memory");
4295 return I40E_ERR_NO_MEMORY;
4298 for (i = 0; i < vlan_num; i++) {
4299 mv_f[i].filter_type = mac_filter->filter_type;
4300 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4304 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4305 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4306 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4307 &mac_filter->mac_addr);
4308 if (ret != I40E_SUCCESS)
4312 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4313 if (ret != I40E_SUCCESS)
4316 /* Add the mac addr into mac list */
4317 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4319 PMD_DRV_LOG(ERR, "failed to allocate memory");
4320 ret = I40E_ERR_NO_MEMORY;
4323 (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4325 f->mac_info.filter_type = mac_filter->filter_type;
4326 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4337 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4339 struct i40e_mac_filter *f;
4340 struct i40e_macvlan_filter *mv_f;
4342 enum rte_mac_filter_type filter_type;
4343 int ret = I40E_SUCCESS;
4345 /* Can't find it, return an error */
4346 f = i40e_find_mac_filter(vsi, addr);
4348 return I40E_ERR_PARAM;
4350 vlan_num = vsi->vlan_num;
4351 filter_type = f->mac_info.filter_type;
4352 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4353 filter_type == RTE_MACVLAN_HASH_MATCH) {
4354 if (vlan_num == 0) {
4355 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4356 return I40E_ERR_PARAM;
4358 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4359 filter_type == RTE_MAC_HASH_MATCH)
4362 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4364 PMD_DRV_LOG(ERR, "failed to allocate memory");
4365 return I40E_ERR_NO_MEMORY;
4368 for (i = 0; i < vlan_num; i++) {
4369 mv_f[i].filter_type = filter_type;
4370 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4373 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4374 filter_type == RTE_MACVLAN_HASH_MATCH) {
4375 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4376 if (ret != I40E_SUCCESS)
4380 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4381 if (ret != I40E_SUCCESS)
4384 /* Remove the mac addr into mac list */
4385 TAILQ_REMOVE(&vsi->mac_list, f, next);
4395 /* Configure hash enable flags for RSS */
4397 i40e_config_hena(uint64_t flags)
4404 if (flags & ETH_RSS_NONF_IPV4_UDP)
4405 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4406 if (flags & ETH_RSS_NONF_IPV4_TCP)
4407 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4408 if (flags & ETH_RSS_NONF_IPV4_SCTP)
4409 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4410 if (flags & ETH_RSS_NONF_IPV4_OTHER)
4411 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4412 if (flags & ETH_RSS_FRAG_IPV4)
4413 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4414 if (flags & ETH_RSS_NONF_IPV6_UDP)
4415 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4416 if (flags & ETH_RSS_NONF_IPV6_TCP)
4417 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4418 if (flags & ETH_RSS_NONF_IPV6_SCTP)
4419 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4420 if (flags & ETH_RSS_NONF_IPV6_OTHER)
4421 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4422 if (flags & ETH_RSS_FRAG_IPV6)
4423 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4424 if (flags & ETH_RSS_L2_PAYLOAD)
4425 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4430 /* Parse the hash enable flags */
4432 i40e_parse_hena(uint64_t flags)
4434 uint64_t rss_hf = 0;
4439 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4440 rss_hf |= ETH_RSS_NONF_IPV4_UDP;
4441 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4442 rss_hf |= ETH_RSS_NONF_IPV4_TCP;
4443 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4444 rss_hf |= ETH_RSS_NONF_IPV4_SCTP;
4445 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4446 rss_hf |= ETH_RSS_NONF_IPV4_OTHER;
4447 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4448 rss_hf |= ETH_RSS_FRAG_IPV4;
4449 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4450 rss_hf |= ETH_RSS_NONF_IPV6_UDP;
4451 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4452 rss_hf |= ETH_RSS_NONF_IPV6_TCP;
4453 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4454 rss_hf |= ETH_RSS_NONF_IPV6_SCTP;
4455 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4456 rss_hf |= ETH_RSS_NONF_IPV6_OTHER;
4457 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4458 rss_hf |= ETH_RSS_FRAG_IPV6;
4459 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4460 rss_hf |= ETH_RSS_L2_PAYLOAD;
4467 i40e_pf_disable_rss(struct i40e_pf *pf)
4469 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4472 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4473 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4474 hena &= ~I40E_RSS_HENA_ALL;
4475 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4476 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4477 I40E_WRITE_FLUSH(hw);
4481 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4484 uint8_t hash_key_len;
4489 hash_key = (uint32_t *)(rss_conf->rss_key);
4490 hash_key_len = rss_conf->rss_key_len;
4491 if (hash_key != NULL && hash_key_len >=
4492 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4493 /* Fill in RSS hash key */
4494 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4495 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4498 rss_hf = rss_conf->rss_hf;
4499 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4500 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4501 hena &= ~I40E_RSS_HENA_ALL;
4502 hena |= i40e_config_hena(rss_hf);
4503 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4504 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4505 I40E_WRITE_FLUSH(hw);
4511 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4512 struct rte_eth_rss_conf *rss_conf)
4514 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4515 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4518 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4519 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4520 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4521 if (rss_hf != 0) /* Enable RSS */
4523 return 0; /* Nothing to do */
4526 if (rss_hf == 0) /* Disable RSS */
4529 return i40e_hw_rss_hash_set(hw, rss_conf);
4533 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4534 struct rte_eth_rss_conf *rss_conf)
4536 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4537 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4541 if (hash_key != NULL) {
4542 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4543 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4544 rss_conf->rss_key_len = i * sizeof(uint32_t);
4546 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4547 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4548 rss_conf->rss_hf = i40e_parse_hena(hena);
4554 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4556 switch (filter_type) {
4557 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4558 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4560 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4561 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4563 case RTE_TUNNEL_FILTER_IMAC_TENID:
4564 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4566 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4567 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4569 case ETH_TUNNEL_FILTER_IMAC:
4570 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4573 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4581 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4582 struct rte_eth_tunnel_filter_conf *tunnel_filter,
4586 uint8_t tun_type = 0;
4588 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4589 struct i40e_vsi *vsi = pf->main_vsi;
4590 struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
4591 struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
4593 cld_filter = rte_zmalloc("tunnel_filter",
4594 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4597 if (NULL == cld_filter) {
4598 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4601 pfilter = cld_filter;
4603 (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4604 sizeof(struct ether_addr));
4605 (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4606 sizeof(struct ether_addr));
4608 pfilter->inner_vlan = tunnel_filter->inner_vlan;
4609 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4610 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4611 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4612 &tunnel_filter->ip_addr,
4613 sizeof(pfilter->ipaddr.v4.data));
4615 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4616 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4617 &tunnel_filter->ip_addr,
4618 sizeof(pfilter->ipaddr.v6.data));
4621 /* check tunneled type */
4622 switch (tunnel_filter->tunnel_type) {
4623 case RTE_TUNNEL_TYPE_VXLAN:
4624 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4627 /* Other tunnel types is not supported. */
4628 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4629 rte_free(cld_filter);
4633 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4636 rte_free(cld_filter);
4640 pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4641 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4642 pfilter->tenant_id = tunnel_filter->tenant_id;
4643 pfilter->queue_number = tunnel_filter->queue_id;
4646 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4648 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4651 rte_free(cld_filter);
4656 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4660 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4661 if (pf->vxlan_ports[i] == port)
4669 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4673 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4675 idx = i40e_get_vxlan_port_idx(pf, port);
4677 /* Check if port already exists */
4679 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4683 /* Now check if there is space to add the new port */
4684 idx = i40e_get_vxlan_port_idx(pf, 0);
4686 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4687 "not adding port %d", port);
4691 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4694 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4698 PMD_DRV_LOG(INFO, "Added %s port %d with AQ command with index %d",
4699 port, filter_index);
4701 /* New port: add it and mark its index in the bitmap */
4702 pf->vxlan_ports[idx] = port;
4703 pf->vxlan_bitmap |= (1 << idx);
4705 if (!(pf->flags & I40E_FLAG_VXLAN))
4706 pf->flags |= I40E_FLAG_VXLAN;
4712 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4715 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4717 if (!(pf->flags & I40E_FLAG_VXLAN)) {
4718 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4722 idx = i40e_get_vxlan_port_idx(pf, port);
4725 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4729 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4730 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4734 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4737 pf->vxlan_ports[idx] = 0;
4738 pf->vxlan_bitmap &= ~(1 << idx);
4740 if (!pf->vxlan_bitmap)
4741 pf->flags &= ~I40E_FLAG_VXLAN;
4746 /* Add UDP tunneling port */
4748 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4749 struct rte_eth_udp_tunnel *udp_tunnel)
4752 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4754 if (udp_tunnel == NULL)
4757 switch (udp_tunnel->prot_type) {
4758 case RTE_TUNNEL_TYPE_VXLAN:
4759 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
4762 case RTE_TUNNEL_TYPE_GENEVE:
4763 case RTE_TUNNEL_TYPE_TEREDO:
4764 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4769 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4777 /* Remove UDP tunneling port */
4779 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
4780 struct rte_eth_udp_tunnel *udp_tunnel)
4783 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4785 if (udp_tunnel == NULL)
4788 switch (udp_tunnel->prot_type) {
4789 case RTE_TUNNEL_TYPE_VXLAN:
4790 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
4792 case RTE_TUNNEL_TYPE_GENEVE:
4793 case RTE_TUNNEL_TYPE_TEREDO:
4794 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
4798 PMD_DRV_LOG(ERR, "Invalid tunnel type");
4808 i40e_pf_config_rss(struct i40e_pf *pf)
4810 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4811 struct rte_eth_rss_conf rss_conf;
4812 uint32_t i, lut = 0;
4813 uint16_t j, num = i40e_align_floor(pf->dev_data->nb_rx_queues);
4815 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
4818 lut = (lut << 8) | (j & ((0x1 <<
4819 hw->func_caps.rss_table_entry_width) - 1));
4821 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
4824 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
4825 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
4826 i40e_pf_disable_rss(pf);
4829 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
4830 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4831 /* Calculate the default hash key */
4832 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4833 rss_key_default[i] = (uint32_t)rte_rand();
4834 rss_conf.rss_key = (uint8_t *)rss_key_default;
4835 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
4839 return i40e_hw_rss_hash_set(hw, &rss_conf);
4843 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
4844 struct rte_eth_tunnel_filter_conf *filter)
4846 if (pf == NULL || filter == NULL) {
4847 PMD_DRV_LOG(ERR, "Invalid parameter");
4851 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
4852 PMD_DRV_LOG(ERR, "Invalid queue ID");
4856 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
4857 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
4861 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
4862 (is_zero_ether_addr(filter->outer_mac))) {
4863 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
4867 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
4868 (is_zero_ether_addr(filter->inner_mac))) {
4869 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
4877 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4880 struct rte_eth_tunnel_filter_conf *filter;
4881 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4882 int ret = I40E_SUCCESS;
4884 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
4886 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
4887 return I40E_ERR_PARAM;
4889 switch (filter_op) {
4890 case RTE_ETH_FILTER_NOP:
4891 if (!(pf->flags & I40E_FLAG_VXLAN))
4892 ret = I40E_NOT_SUPPORTED;
4893 case RTE_ETH_FILTER_ADD:
4894 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
4896 case RTE_ETH_FILTER_DELETE:
4897 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
4900 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4901 ret = I40E_ERR_PARAM;
4909 i40e_pf_config_mq_rx(struct i40e_pf *pf)
4911 if (!pf->dev_data->sriov.active) {
4912 switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
4914 i40e_pf_config_rss(pf);
4917 i40e_pf_disable_rss(pf);
4926 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
4927 enum rte_filter_type filter_type,
4928 enum rte_filter_op filter_op,
4936 switch (filter_type) {
4937 case RTE_ETH_FILTER_MACVLAN:
4938 ret = i40e_mac_filter_handle(dev, filter_op, arg);
4940 case RTE_ETH_FILTER_TUNNEL:
4941 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
4944 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",