4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
43 #include <rte_string_fns.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_alarm.h>
52 #include <rte_eth_ctrl.h>
54 #include "i40e_logs.h"
55 #include "base/i40e_prototype.h"
56 #include "base/i40e_adminq_cmd.h"
57 #include "base/i40e_type.h"
58 #include "base/i40e_register.h"
59 #include "i40e_ethdev.h"
60 #include "i40e_rxtx.h"
63 /* Maximun number of MAC addresses */
64 #define I40E_NUM_MACADDR_MAX 64
65 #define I40E_CLEAR_PXE_WAIT_MS 200
67 /* Maximun number of capability elements */
68 #define I40E_MAX_CAP_ELE_NUM 128
70 /* Wait count and inteval */
71 #define I40E_CHK_Q_ENA_COUNT 1000
72 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
74 /* Maximun number of VSI */
75 #define I40E_MAX_NUM_VSIS (384UL)
77 /* Default queue interrupt throttling time in microseconds */
78 #define I40E_ITR_INDEX_DEFAULT 0
79 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
80 #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
82 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
84 /* Mask of PF interrupt causes */
85 #define I40E_PFINT_ICR0_ENA_MASK ( \
86 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
87 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
88 I40E_PFINT_ICR0_ENA_GRST_MASK | \
89 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
90 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
91 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
92 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
93 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
94 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
95 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
97 #define I40E_FLOW_TYPES ( \
98 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
99 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
100 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
101 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
102 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
103 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
104 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
105 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
106 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
107 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
108 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
110 #define I40E_PTP_40GB_INCVAL 0x0199999999ULL
111 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
112 #define I40E_PTP_1GB_INCVAL 0x2000000000ULL
113 #define I40E_PRTTSYN_TSYNENA 0x80000000
114 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
116 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
117 static int i40e_dev_configure(struct rte_eth_dev *dev);
118 static int i40e_dev_start(struct rte_eth_dev *dev);
119 static void i40e_dev_stop(struct rte_eth_dev *dev);
120 static void i40e_dev_close(struct rte_eth_dev *dev);
121 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
122 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
123 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
124 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
125 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
126 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
127 static void i40e_dev_stats_get(struct rte_eth_dev *dev,
128 struct rte_eth_stats *stats);
129 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
130 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
134 static void i40e_dev_info_get(struct rte_eth_dev *dev,
135 struct rte_eth_dev_info *dev_info);
136 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
139 static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid);
140 static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
141 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
144 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
145 static int i40e_dev_led_on(struct rte_eth_dev *dev);
146 static int i40e_dev_led_off(struct rte_eth_dev *dev);
147 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
148 struct rte_eth_fc_conf *fc_conf);
149 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
150 struct rte_eth_pfc_conf *pfc_conf);
151 static void i40e_macaddr_add(struct rte_eth_dev *dev,
152 struct ether_addr *mac_addr,
155 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
156 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
157 struct rte_eth_rss_reta_entry64 *reta_conf,
159 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
160 struct rte_eth_rss_reta_entry64 *reta_conf,
163 static int i40e_get_cap(struct i40e_hw *hw);
164 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
165 static int i40e_pf_setup(struct i40e_pf *pf);
166 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
167 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
168 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
169 bool offset_loaded, uint64_t *offset, uint64_t *stat);
170 static void i40e_stat_update_48(struct i40e_hw *hw,
176 static void i40e_pf_config_irq0(struct i40e_hw *hw);
177 static void i40e_dev_interrupt_handler(
178 __rte_unused struct rte_intr_handle *handle, void *param);
179 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
180 uint32_t base, uint32_t num);
181 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
182 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
184 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
186 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
187 static int i40e_veb_release(struct i40e_veb *veb);
188 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
189 struct i40e_vsi *vsi);
190 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
191 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
192 static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
193 struct i40e_macvlan_filter *mv_f,
195 struct ether_addr *addr);
196 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
197 struct i40e_macvlan_filter *mv_f,
200 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
201 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
202 struct rte_eth_rss_conf *rss_conf);
203 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
204 struct rte_eth_rss_conf *rss_conf);
205 static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
206 struct rte_eth_udp_tunnel *udp_tunnel);
207 static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
208 struct rte_eth_udp_tunnel *udp_tunnel);
209 static int i40e_ethertype_filter_set(struct i40e_pf *pf,
210 struct rte_eth_ethertype_filter *filter,
212 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
213 enum rte_filter_op filter_op,
215 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
216 enum rte_filter_type filter_type,
217 enum rte_filter_op filter_op,
219 static void i40e_configure_registers(struct i40e_hw *hw);
220 static void i40e_hw_init(struct i40e_hw *hw);
221 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
222 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
223 struct rte_eth_mirror_conf *mirror_conf,
224 uint8_t sw_id, uint8_t on);
225 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
227 static int i40e_timesync_enable(struct rte_eth_dev *dev);
228 static int i40e_timesync_disable(struct rte_eth_dev *dev);
229 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
230 struct timespec *timestamp,
232 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
233 struct timespec *timestamp);
235 static const struct rte_pci_id pci_id_i40e_map[] = {
236 #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
237 #include "rte_pci_dev_ids.h"
238 { .vendor_id = 0, /* sentinel */ },
241 static const struct eth_dev_ops i40e_eth_dev_ops = {
242 .dev_configure = i40e_dev_configure,
243 .dev_start = i40e_dev_start,
244 .dev_stop = i40e_dev_stop,
245 .dev_close = i40e_dev_close,
246 .promiscuous_enable = i40e_dev_promiscuous_enable,
247 .promiscuous_disable = i40e_dev_promiscuous_disable,
248 .allmulticast_enable = i40e_dev_allmulticast_enable,
249 .allmulticast_disable = i40e_dev_allmulticast_disable,
250 .dev_set_link_up = i40e_dev_set_link_up,
251 .dev_set_link_down = i40e_dev_set_link_down,
252 .link_update = i40e_dev_link_update,
253 .stats_get = i40e_dev_stats_get,
254 .stats_reset = i40e_dev_stats_reset,
255 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
256 .dev_infos_get = i40e_dev_info_get,
257 .vlan_filter_set = i40e_vlan_filter_set,
258 .vlan_tpid_set = i40e_vlan_tpid_set,
259 .vlan_offload_set = i40e_vlan_offload_set,
260 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
261 .vlan_pvid_set = i40e_vlan_pvid_set,
262 .rx_queue_start = i40e_dev_rx_queue_start,
263 .rx_queue_stop = i40e_dev_rx_queue_stop,
264 .tx_queue_start = i40e_dev_tx_queue_start,
265 .tx_queue_stop = i40e_dev_tx_queue_stop,
266 .rx_queue_setup = i40e_dev_rx_queue_setup,
267 .rx_queue_release = i40e_dev_rx_queue_release,
268 .rx_queue_count = i40e_dev_rx_queue_count,
269 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
270 .tx_queue_setup = i40e_dev_tx_queue_setup,
271 .tx_queue_release = i40e_dev_tx_queue_release,
272 .dev_led_on = i40e_dev_led_on,
273 .dev_led_off = i40e_dev_led_off,
274 .flow_ctrl_set = i40e_flow_ctrl_set,
275 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
276 .mac_addr_add = i40e_macaddr_add,
277 .mac_addr_remove = i40e_macaddr_remove,
278 .reta_update = i40e_dev_rss_reta_update,
279 .reta_query = i40e_dev_rss_reta_query,
280 .rss_hash_update = i40e_dev_rss_hash_update,
281 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
282 .udp_tunnel_add = i40e_dev_udp_tunnel_add,
283 .udp_tunnel_del = i40e_dev_udp_tunnel_del,
284 .filter_ctrl = i40e_dev_filter_ctrl,
285 .mirror_rule_set = i40e_mirror_rule_set,
286 .mirror_rule_reset = i40e_mirror_rule_reset,
287 .timesync_enable = i40e_timesync_enable,
288 .timesync_disable = i40e_timesync_disable,
289 .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
290 .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
293 static struct eth_driver rte_i40e_pmd = {
295 .name = "rte_i40e_pmd",
296 .id_table = pci_id_i40e_map,
297 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
299 .eth_dev_init = eth_i40e_dev_init,
300 .dev_private_size = sizeof(struct i40e_adapter),
304 i40e_align_floor(int n)
308 return (1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)));
312 rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
313 struct rte_eth_link *link)
315 struct rte_eth_link *dst = link;
316 struct rte_eth_link *src = &(dev->data->dev_link);
318 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
319 *(uint64_t *)src) == 0)
326 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
327 struct rte_eth_link *link)
329 struct rte_eth_link *dst = &(dev->data->dev_link);
330 struct rte_eth_link *src = link;
332 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
333 *(uint64_t *)src) == 0)
340 * Driver initialization routine.
341 * Invoked once at EAL init time.
342 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
345 rte_i40e_pmd_init(const char *name __rte_unused,
346 const char *params __rte_unused)
348 PMD_INIT_FUNC_TRACE();
349 rte_eth_driver_register(&rte_i40e_pmd);
354 static struct rte_driver rte_i40e_driver = {
356 .init = rte_i40e_pmd_init,
359 PMD_REGISTER_DRIVER(rte_i40e_driver);
362 * Initialize registers for flexible payload, which should be set by NVM.
363 * This should be removed from code once it is fixed in NVM.
365 #ifndef I40E_GLQF_ORT
366 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
368 #ifndef I40E_GLQF_PIT
369 #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
372 static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
374 I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
375 I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
376 I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
377 I40E_WRITE_REG(hw, I40E_GLQF_ORT(30), 0x0000002B);
378 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0);
379 I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3);
380 I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6);
381 I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
382 I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
383 I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
385 /* GLQF_PIT Registers */
386 I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
387 I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
391 eth_i40e_dev_init(struct rte_eth_dev *dev)
393 struct rte_pci_device *pci_dev;
394 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
395 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
396 struct i40e_vsi *vsi;
401 PMD_INIT_FUNC_TRACE();
403 dev->dev_ops = &i40e_eth_dev_ops;
404 dev->rx_pkt_burst = i40e_recv_pkts;
405 dev->tx_pkt_burst = i40e_xmit_pkts;
407 /* for secondary processes, we don't initialise any further as primary
408 * has already done this work. Only check we don't need a different
410 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
411 if (dev->data->scattered_rx)
412 dev->rx_pkt_burst = i40e_recv_scattered_pkts;
415 pci_dev = dev->pci_dev;
416 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
417 pf->adapter->eth_dev = dev;
418 pf->dev_data = dev->data;
420 hw->back = I40E_PF_TO_ADAPTER(pf);
421 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
423 PMD_INIT_LOG(ERR, "Hardware is not available, "
424 "as address is NULL");
428 hw->vendor_id = pci_dev->id.vendor_id;
429 hw->device_id = pci_dev->id.device_id;
430 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
431 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
432 hw->bus.device = pci_dev->addr.devid;
433 hw->bus.func = pci_dev->addr.function;
435 /* Make sure all is clean before doing PF reset */
438 /* Initialize the hardware */
441 /* Reset here to make sure all is clean for each PF */
442 ret = i40e_pf_reset(hw);
444 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
448 /* Initialize the shared code (base driver) */
449 ret = i40e_init_shared_code(hw);
451 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
456 * To work around the NVM issue,initialize registers
457 * for flexible payload by software.
458 * It should be removed once issues are fixed in NVM.
460 i40e_flex_payload_reg_init(hw);
462 /* Initialize the parameters for adminq */
463 i40e_init_adminq_parameter(hw);
464 ret = i40e_init_adminq(hw);
465 if (ret != I40E_SUCCESS) {
466 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
469 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
470 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
471 hw->aq.api_maj_ver, hw->aq.api_min_ver,
472 ((hw->nvm.version >> 12) & 0xf),
473 ((hw->nvm.version >> 4) & 0xff),
474 (hw->nvm.version & 0xf), hw->nvm.eetrack);
477 ret = i40e_aq_stop_lldp(hw, true, NULL);
478 if (ret != I40E_SUCCESS) /* Its failure can be ignored */
479 PMD_INIT_LOG(INFO, "Failed to stop lldp");
482 i40e_clear_pxe_mode(hw);
485 * On X710, performance number is far from the expectation on recent
486 * firmware versions. The fix for this issue may not be integrated in
487 * the following firmware version. So the workaround in software driver
488 * is needed. It needs to modify the initial values of 3 internal only
489 * registers. Note that the workaround can be removed when it is fixed
490 * in firmware in the future.
492 i40e_configure_registers(hw);
494 /* Get hw capabilities */
495 ret = i40e_get_cap(hw);
496 if (ret != I40E_SUCCESS) {
497 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
498 goto err_get_capabilities;
501 /* Initialize parameters for PF */
502 ret = i40e_pf_parameter_init(dev);
504 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
505 goto err_parameter_init;
508 /* Initialize the queue management */
509 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
511 PMD_INIT_LOG(ERR, "Failed to init queue pool");
512 goto err_qp_pool_init;
514 ret = i40e_res_pool_init(&pf->msix_pool, 1,
515 hw->func_caps.num_msix_vectors - 1);
517 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
518 goto err_msix_pool_init;
521 /* Initialize lan hmc */
522 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
523 hw->func_caps.num_rx_qp, 0, 0);
524 if (ret != I40E_SUCCESS) {
525 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
526 goto err_init_lan_hmc;
529 /* Configure lan hmc */
530 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
531 if (ret != I40E_SUCCESS) {
532 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
533 goto err_configure_lan_hmc;
536 /* Get and check the mac address */
537 i40e_get_mac_addr(hw, hw->mac.addr);
538 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
539 PMD_INIT_LOG(ERR, "mac address is not valid");
541 goto err_get_mac_addr;
543 /* Copy the permanent MAC address */
544 ether_addr_copy((struct ether_addr *) hw->mac.addr,
545 (struct ether_addr *) hw->mac.perm_addr);
547 /* Disable flow control */
548 hw->fc.requested_mode = I40E_FC_NONE;
549 i40e_set_fc(hw, &aq_fail, TRUE);
551 /* PF setup, which includes VSI setup */
552 ret = i40e_pf_setup(pf);
554 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
555 goto err_setup_pf_switch;
560 /* Disable double vlan by default */
561 i40e_vsi_config_double_vlan(vsi, FALSE);
563 if (!vsi->max_macaddrs)
564 len = ETHER_ADDR_LEN;
566 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
568 /* Should be after VSI initialized */
569 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
570 if (!dev->data->mac_addrs) {
571 PMD_INIT_LOG(ERR, "Failed to allocated memory "
572 "for storing mac address");
575 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
576 &dev->data->mac_addrs[0]);
578 /* initialize pf host driver to setup SRIOV resource if applicable */
579 i40e_pf_host_init(dev);
581 /* register callback func to eal lib */
582 rte_intr_callback_register(&(pci_dev->intr_handle),
583 i40e_dev_interrupt_handler, (void *)dev);
585 /* configure and enable device interrupt */
586 i40e_pf_config_irq0(hw);
587 i40e_pf_enable_irq0(hw);
589 /* enable uio intr after callback register */
590 rte_intr_enable(&(pci_dev->intr_handle));
592 /* initialize mirror rule list */
593 TAILQ_INIT(&pf->mirror_list);
598 i40e_vsi_release(pf->main_vsi);
601 err_configure_lan_hmc:
602 (void)i40e_shutdown_lan_hmc(hw);
604 i40e_res_pool_destroy(&pf->msix_pool);
606 i40e_res_pool_destroy(&pf->qp_pool);
609 err_get_capabilities:
610 (void)i40e_shutdown_adminq(hw);
616 i40e_dev_configure(struct rte_eth_dev *dev)
618 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
619 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
622 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
623 ret = i40e_fdir_setup(pf);
624 if (ret != I40E_SUCCESS) {
625 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
628 ret = i40e_fdir_configure(dev);
630 PMD_DRV_LOG(ERR, "failed to configure fdir.");
634 i40e_fdir_teardown(pf);
636 ret = i40e_dev_init_vlan(dev);
641 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
642 * RSS setting have different requirements.
643 * General PMD driver call sequence are NIC init, configure,
644 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
645 * will try to lookup the VSI that specific queue belongs to if VMDQ
646 * applicable. So, VMDQ setting has to be done before
647 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
648 * For RSS setting, it will try to calculate actual configured RX queue
649 * number, which will be available after rx_queue_setup(). dev_start()
650 * function is good to place RSS setup.
652 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
653 ret = i40e_vmdq_setup(dev);
659 i40e_fdir_teardown(pf);
664 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
666 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
667 uint16_t msix_vect = vsi->msix_intr;
670 for (i = 0; i < vsi->nb_qps; i++) {
671 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
672 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
676 if (vsi->type != I40E_VSI_SRIOV) {
677 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1), 0);
678 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
682 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
683 vsi->user_param + (msix_vect - 1);
685 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), 0);
687 I40E_WRITE_FLUSH(hw);
690 static inline uint16_t
691 i40e_calc_itr_interval(int16_t interval)
693 if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX)
694 interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
696 /* Convert to hardware count, as writing each 1 represents 2 us */
701 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
704 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
705 uint16_t msix_vect = vsi->msix_intr;
708 for (i = 0; i < vsi->nb_qps; i++)
709 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
711 /* Bind all RX queues to allocated MSIX interrupt */
712 for (i = 0; i < vsi->nb_qps; i++) {
713 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
714 I40E_QINT_RQCTL_ITR_INDX_MASK |
715 ((vsi->base_queue + i + 1) <<
716 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
717 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
718 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
720 if (i == vsi->nb_qps - 1)
721 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
722 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), val);
725 /* Write first RX queue to Link list register as the head element */
726 if (vsi->type != I40E_VSI_SRIOV) {
728 i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
730 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
732 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
733 (0x0 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
735 I40E_WRITE_REG(hw, I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
736 msix_vect - 1), interval);
738 #ifndef I40E_GLINT_CTL
739 #define I40E_GLINT_CTL 0x0003F800
740 #define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK 0x4
742 /* Disable auto-mask on enabling of all none-zero interrupt */
743 I40E_WRITE_REG(hw, I40E_GLINT_CTL,
744 I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK);
748 /* num_msix_vectors_vf needs to minus irq0 */
749 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
750 vsi->user_param + (msix_vect - 1);
752 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg), (vsi->base_queue <<
753 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
754 (0x0 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
757 I40E_WRITE_FLUSH(hw);
761 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
763 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
764 uint16_t interval = i40e_calc_itr_interval(\
765 RTE_LIBRTE_I40E_ITR_INTERVAL);
767 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1),
768 I40E_PFINT_DYN_CTLN_INTENA_MASK |
769 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
770 (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
771 (interval << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
775 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
777 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
779 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(vsi->msix_intr - 1), 0);
782 static inline uint8_t
783 i40e_parse_link_speed(uint16_t eth_link_speed)
785 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
787 switch (eth_link_speed) {
788 case ETH_LINK_SPEED_40G:
789 link_speed = I40E_LINK_SPEED_40GB;
791 case ETH_LINK_SPEED_20G:
792 link_speed = I40E_LINK_SPEED_20GB;
794 case ETH_LINK_SPEED_10G:
795 link_speed = I40E_LINK_SPEED_10GB;
797 case ETH_LINK_SPEED_1000:
798 link_speed = I40E_LINK_SPEED_1GB;
800 case ETH_LINK_SPEED_100:
801 link_speed = I40E_LINK_SPEED_100MB;
809 i40e_phy_conf_link(struct i40e_hw *hw, uint8_t abilities, uint8_t force_speed)
811 enum i40e_status_code status;
812 struct i40e_aq_get_phy_abilities_resp phy_ab;
813 struct i40e_aq_set_phy_config phy_conf;
814 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
815 I40E_AQ_PHY_FLAG_PAUSE_RX |
816 I40E_AQ_PHY_FLAG_LOW_POWER;
817 const uint8_t advt = I40E_LINK_SPEED_40GB |
818 I40E_LINK_SPEED_10GB |
819 I40E_LINK_SPEED_1GB |
820 I40E_LINK_SPEED_100MB;
823 /* Skip it on 40G interfaces, as a workaround for the link issue */
824 if (i40e_is_40G_device(hw->device_id))
827 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
832 memset(&phy_conf, 0, sizeof(phy_conf));
834 /* bits 0-2 use the values from get_phy_abilities_resp */
836 abilities |= phy_ab.abilities & mask;
838 /* update ablities and speed */
839 if (abilities & I40E_AQ_PHY_AN_ENABLED)
840 phy_conf.link_speed = advt;
842 phy_conf.link_speed = force_speed;
844 phy_conf.abilities = abilities;
846 /* use get_phy_abilities_resp value for the rest */
847 phy_conf.phy_type = phy_ab.phy_type;
848 phy_conf.eee_capability = phy_ab.eee_capability;
849 phy_conf.eeer = phy_ab.eeer_val;
850 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
852 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
853 phy_ab.abilities, phy_ab.link_speed);
854 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
855 phy_conf.abilities, phy_conf.link_speed);
857 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
865 i40e_apply_link_speed(struct rte_eth_dev *dev)
868 uint8_t abilities = 0;
869 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
870 struct rte_eth_conf *conf = &dev->data->dev_conf;
872 speed = i40e_parse_link_speed(conf->link_speed);
873 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
874 if (conf->link_speed == ETH_LINK_SPEED_AUTONEG)
875 abilities |= I40E_AQ_PHY_AN_ENABLED;
877 abilities |= I40E_AQ_PHY_LINK_ENABLED;
879 return i40e_phy_conf_link(hw, abilities, speed);
883 i40e_dev_start(struct rte_eth_dev *dev)
885 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
886 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
887 struct i40e_vsi *main_vsi = pf->main_vsi;
890 if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
891 (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
892 PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
893 dev->data->dev_conf.link_duplex,
899 ret = i40e_dev_rxtx_init(pf);
900 if (ret != I40E_SUCCESS) {
901 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
905 /* Map queues with MSIX interrupt */
906 i40e_vsi_queues_bind_intr(main_vsi);
907 i40e_vsi_enable_queues_intr(main_vsi);
909 /* Map VMDQ VSI queues with MSIX interrupt */
910 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
911 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
912 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
915 /* enable FDIR MSIX interrupt */
916 if (pf->fdir.fdir_vsi) {
917 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
918 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
921 /* Enable all queues which have been configured */
922 ret = i40e_dev_switch_queues(pf, TRUE);
923 if (ret != I40E_SUCCESS) {
924 PMD_DRV_LOG(ERR, "Failed to enable VSI");
928 /* Enable receiving broadcast packets */
929 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
930 if (ret != I40E_SUCCESS)
931 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
933 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
934 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
936 if (ret != I40E_SUCCESS)
937 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
940 /* Apply link configure */
941 ret = i40e_apply_link_speed(dev);
942 if (I40E_SUCCESS != ret) {
943 PMD_DRV_LOG(ERR, "Fail to apply link setting");
950 i40e_dev_switch_queues(pf, FALSE);
951 i40e_dev_clear_queues(dev);
957 i40e_dev_stop(struct rte_eth_dev *dev)
959 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
960 struct i40e_vsi *main_vsi = pf->main_vsi;
961 struct i40e_mirror_rule *p_mirror;
964 /* Disable all queues */
965 i40e_dev_switch_queues(pf, FALSE);
967 /* un-map queues with interrupt registers */
968 i40e_vsi_disable_queues_intr(main_vsi);
969 i40e_vsi_queues_unbind_intr(main_vsi);
971 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
972 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
973 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
976 if (pf->fdir.fdir_vsi) {
977 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi);
978 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
980 /* Clear all queues and release memory */
981 i40e_dev_clear_queues(dev);
984 i40e_dev_set_link_down(dev);
986 /* Remove all mirror rules */
987 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
988 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
991 pf->nb_mirror_rule = 0;
996 i40e_dev_close(struct rte_eth_dev *dev)
998 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
999 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1002 PMD_INIT_FUNC_TRACE();
1006 /* Disable interrupt */
1007 i40e_pf_disable_irq0(hw);
1008 rte_intr_disable(&(dev->pci_dev->intr_handle));
1010 /* shutdown and destroy the HMC */
1011 i40e_shutdown_lan_hmc(hw);
1013 /* release all the existing VSIs and VEBs */
1014 i40e_fdir_teardown(pf);
1015 i40e_vsi_release(pf->main_vsi);
1017 /* shutdown the adminq */
1018 i40e_aq_queue_shutdown(hw, true);
1019 i40e_shutdown_adminq(hw);
1021 i40e_res_pool_destroy(&pf->qp_pool);
1022 i40e_res_pool_destroy(&pf->msix_pool);
1024 /* force a PF reset to clean anything leftover */
1025 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
1026 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
1027 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
1028 I40E_WRITE_FLUSH(hw);
1032 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
1034 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1035 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1036 struct i40e_vsi *vsi = pf->main_vsi;
1039 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1041 if (status != I40E_SUCCESS)
1042 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
1044 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1046 if (status != I40E_SUCCESS)
1047 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1052 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
1054 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1055 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056 struct i40e_vsi *vsi = pf->main_vsi;
1059 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1061 if (status != I40E_SUCCESS)
1062 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
1064 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1066 if (status != I40E_SUCCESS)
1067 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1071 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
1073 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1074 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 struct i40e_vsi *vsi = pf->main_vsi;
1078 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
1079 if (ret != I40E_SUCCESS)
1080 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
1084 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
1086 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1087 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088 struct i40e_vsi *vsi = pf->main_vsi;
1091 if (dev->data->promiscuous == 1)
1092 return; /* must remain in all_multicast mode */
1094 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
1095 vsi->seid, FALSE, NULL);
1096 if (ret != I40E_SUCCESS)
1097 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
1101 * Set device link up.
1104 i40e_dev_set_link_up(struct rte_eth_dev *dev)
1106 /* re-apply link speed setting */
1107 return i40e_apply_link_speed(dev);
1111 * Set device link down.
1114 i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev)
1116 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
1117 uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
1118 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1120 return i40e_phy_conf_link(hw, abilities, speed);
1124 i40e_dev_link_update(struct rte_eth_dev *dev,
1125 int wait_to_complete)
1127 #define CHECK_INTERVAL 100 /* 100ms */
1128 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1129 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1130 struct i40e_link_status link_status;
1131 struct rte_eth_link link, old;
1133 unsigned rep_cnt = MAX_REPEAT_TIME;
1135 memset(&link, 0, sizeof(link));
1136 memset(&old, 0, sizeof(old));
1137 memset(&link_status, 0, sizeof(link_status));
1138 rte_i40e_dev_atomic_read_link_status(dev, &old);
1141 /* Get link status information from hardware */
1142 status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
1143 if (status != I40E_SUCCESS) {
1144 link.link_speed = ETH_LINK_SPEED_100;
1145 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1146 PMD_DRV_LOG(ERR, "Failed to get link info");
1150 link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
1151 if (!wait_to_complete)
1154 rte_delay_ms(CHECK_INTERVAL);
1155 } while (!link.link_status && rep_cnt--);
1157 if (!link.link_status)
1160 /* i40e uses full duplex only */
1161 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1163 /* Parse the link status */
1164 switch (link_status.link_speed) {
1165 case I40E_LINK_SPEED_100MB:
1166 link.link_speed = ETH_LINK_SPEED_100;
1168 case I40E_LINK_SPEED_1GB:
1169 link.link_speed = ETH_LINK_SPEED_1000;
1171 case I40E_LINK_SPEED_10GB:
1172 link.link_speed = ETH_LINK_SPEED_10G;
1174 case I40E_LINK_SPEED_20GB:
1175 link.link_speed = ETH_LINK_SPEED_20G;
1177 case I40E_LINK_SPEED_40GB:
1178 link.link_speed = ETH_LINK_SPEED_40G;
1181 link.link_speed = ETH_LINK_SPEED_100;
1186 rte_i40e_dev_atomic_write_link_status(dev, &link);
1187 if (link.link_status == old.link_status)
1193 /* Get all the statistics of a VSI */
1195 i40e_update_vsi_stats(struct i40e_vsi *vsi)
1197 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
1198 struct i40e_eth_stats *nes = &vsi->eth_stats;
1199 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1200 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
1202 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
1203 vsi->offset_loaded, &oes->rx_bytes,
1205 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
1206 vsi->offset_loaded, &oes->rx_unicast,
1208 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
1209 vsi->offset_loaded, &oes->rx_multicast,
1210 &nes->rx_multicast);
1211 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
1212 vsi->offset_loaded, &oes->rx_broadcast,
1213 &nes->rx_broadcast);
1214 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
1215 &oes->rx_discards, &nes->rx_discards);
1216 /* GLV_REPC not supported */
1217 /* GLV_RMPC not supported */
1218 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
1219 &oes->rx_unknown_protocol,
1220 &nes->rx_unknown_protocol);
1221 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
1222 vsi->offset_loaded, &oes->tx_bytes,
1224 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
1225 vsi->offset_loaded, &oes->tx_unicast,
1227 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
1228 vsi->offset_loaded, &oes->tx_multicast,
1229 &nes->tx_multicast);
1230 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
1231 vsi->offset_loaded, &oes->tx_broadcast,
1232 &nes->tx_broadcast);
1233 /* GLV_TDPC not supported */
1234 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
1235 &oes->tx_errors, &nes->tx_errors);
1236 vsi->offset_loaded = true;
1238 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
1240 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
1241 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
1242 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
1243 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
1244 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
1245 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1246 nes->rx_unknown_protocol);
1247 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
1248 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
1249 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
1250 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
1251 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
1252 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
1253 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
1257 /* Get all statistics of a port */
1259 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1262 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1263 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
1265 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
1267 /* Get statistics of struct i40e_eth_stats */
1268 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
1269 I40E_GLPRT_GORCL(hw->port),
1270 pf->offset_loaded, &os->eth.rx_bytes,
1272 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
1273 I40E_GLPRT_UPRCL(hw->port),
1274 pf->offset_loaded, &os->eth.rx_unicast,
1275 &ns->eth.rx_unicast);
1276 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
1277 I40E_GLPRT_MPRCL(hw->port),
1278 pf->offset_loaded, &os->eth.rx_multicast,
1279 &ns->eth.rx_multicast);
1280 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
1281 I40E_GLPRT_BPRCL(hw->port),
1282 pf->offset_loaded, &os->eth.rx_broadcast,
1283 &ns->eth.rx_broadcast);
1284 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
1285 pf->offset_loaded, &os->eth.rx_discards,
1286 &ns->eth.rx_discards);
1287 /* GLPRT_REPC not supported */
1288 /* GLPRT_RMPC not supported */
1289 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
1291 &os->eth.rx_unknown_protocol,
1292 &ns->eth.rx_unknown_protocol);
1293 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
1294 I40E_GLPRT_GOTCL(hw->port),
1295 pf->offset_loaded, &os->eth.tx_bytes,
1297 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
1298 I40E_GLPRT_UPTCL(hw->port),
1299 pf->offset_loaded, &os->eth.tx_unicast,
1300 &ns->eth.tx_unicast);
1301 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
1302 I40E_GLPRT_MPTCL(hw->port),
1303 pf->offset_loaded, &os->eth.tx_multicast,
1304 &ns->eth.tx_multicast);
1305 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
1306 I40E_GLPRT_BPTCL(hw->port),
1307 pf->offset_loaded, &os->eth.tx_broadcast,
1308 &ns->eth.tx_broadcast);
1309 /* GLPRT_TEPC not supported */
1311 /* additional port specific stats */
1312 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
1313 pf->offset_loaded, &os->tx_dropped_link_down,
1314 &ns->tx_dropped_link_down);
1315 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
1316 pf->offset_loaded, &os->crc_errors,
1318 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
1319 pf->offset_loaded, &os->illegal_bytes,
1320 &ns->illegal_bytes);
1321 /* GLPRT_ERRBC not supported */
1322 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
1323 pf->offset_loaded, &os->mac_local_faults,
1324 &ns->mac_local_faults);
1325 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
1326 pf->offset_loaded, &os->mac_remote_faults,
1327 &ns->mac_remote_faults);
1328 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
1329 pf->offset_loaded, &os->rx_length_errors,
1330 &ns->rx_length_errors);
1331 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
1332 pf->offset_loaded, &os->link_xon_rx,
1334 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1335 pf->offset_loaded, &os->link_xoff_rx,
1337 for (i = 0; i < 8; i++) {
1338 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1340 &os->priority_xon_rx[i],
1341 &ns->priority_xon_rx[i]);
1342 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1344 &os->priority_xoff_rx[i],
1345 &ns->priority_xoff_rx[i]);
1347 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
1348 pf->offset_loaded, &os->link_xon_tx,
1350 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1351 pf->offset_loaded, &os->link_xoff_tx,
1353 for (i = 0; i < 8; i++) {
1354 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1356 &os->priority_xon_tx[i],
1357 &ns->priority_xon_tx[i]);
1358 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1360 &os->priority_xoff_tx[i],
1361 &ns->priority_xoff_tx[i]);
1362 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1364 &os->priority_xon_2_xoff[i],
1365 &ns->priority_xon_2_xoff[i]);
1367 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
1368 I40E_GLPRT_PRC64L(hw->port),
1369 pf->offset_loaded, &os->rx_size_64,
1371 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
1372 I40E_GLPRT_PRC127L(hw->port),
1373 pf->offset_loaded, &os->rx_size_127,
1375 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
1376 I40E_GLPRT_PRC255L(hw->port),
1377 pf->offset_loaded, &os->rx_size_255,
1379 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
1380 I40E_GLPRT_PRC511L(hw->port),
1381 pf->offset_loaded, &os->rx_size_511,
1383 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
1384 I40E_GLPRT_PRC1023L(hw->port),
1385 pf->offset_loaded, &os->rx_size_1023,
1387 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
1388 I40E_GLPRT_PRC1522L(hw->port),
1389 pf->offset_loaded, &os->rx_size_1522,
1391 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
1392 I40E_GLPRT_PRC9522L(hw->port),
1393 pf->offset_loaded, &os->rx_size_big,
1395 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
1396 pf->offset_loaded, &os->rx_undersize,
1398 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
1399 pf->offset_loaded, &os->rx_fragments,
1401 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
1402 pf->offset_loaded, &os->rx_oversize,
1404 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
1405 pf->offset_loaded, &os->rx_jabber,
1407 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
1408 I40E_GLPRT_PTC64L(hw->port),
1409 pf->offset_loaded, &os->tx_size_64,
1411 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
1412 I40E_GLPRT_PTC127L(hw->port),
1413 pf->offset_loaded, &os->tx_size_127,
1415 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
1416 I40E_GLPRT_PTC255L(hw->port),
1417 pf->offset_loaded, &os->tx_size_255,
1419 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
1420 I40E_GLPRT_PTC511L(hw->port),
1421 pf->offset_loaded, &os->tx_size_511,
1423 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
1424 I40E_GLPRT_PTC1023L(hw->port),
1425 pf->offset_loaded, &os->tx_size_1023,
1427 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
1428 I40E_GLPRT_PTC1522L(hw->port),
1429 pf->offset_loaded, &os->tx_size_1522,
1431 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
1432 I40E_GLPRT_PTC9522L(hw->port),
1433 pf->offset_loaded, &os->tx_size_big,
1435 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
1437 &os->fd_sb_match, &ns->fd_sb_match);
1438 /* GLPRT_MSPDC not supported */
1439 /* GLPRT_XEC not supported */
1441 pf->offset_loaded = true;
1444 i40e_update_vsi_stats(pf->main_vsi);
1446 stats->ipackets = ns->eth.rx_unicast + ns->eth.rx_multicast +
1447 ns->eth.rx_broadcast;
1448 stats->opackets = ns->eth.tx_unicast + ns->eth.tx_multicast +
1449 ns->eth.tx_broadcast;
1450 stats->ibytes = ns->eth.rx_bytes;
1451 stats->obytes = ns->eth.tx_bytes;
1452 stats->oerrors = ns->eth.tx_errors;
1453 stats->imcasts = ns->eth.rx_multicast;
1454 stats->fdirmatch = ns->fd_sb_match;
1457 stats->ibadcrc = ns->crc_errors;
1458 stats->ibadlen = ns->rx_length_errors + ns->rx_undersize +
1459 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
1460 stats->imissed = ns->eth.rx_discards;
1461 stats->ierrors = stats->ibadcrc + stats->ibadlen + stats->imissed;
1463 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
1464 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
1465 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
1466 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
1467 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
1468 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
1469 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
1470 ns->eth.rx_unknown_protocol);
1471 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
1472 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
1473 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
1474 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
1475 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
1476 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
1478 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
1479 ns->tx_dropped_link_down);
1480 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
1481 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
1483 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
1484 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
1485 ns->mac_local_faults);
1486 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
1487 ns->mac_remote_faults);
1488 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
1489 ns->rx_length_errors);
1490 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
1491 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
1492 for (i = 0; i < 8; i++) {
1493 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
1494 i, ns->priority_xon_rx[i]);
1495 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
1496 i, ns->priority_xoff_rx[i]);
1498 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
1499 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
1500 for (i = 0; i < 8; i++) {
1501 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
1502 i, ns->priority_xon_tx[i]);
1503 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
1504 i, ns->priority_xoff_tx[i]);
1505 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
1506 i, ns->priority_xon_2_xoff[i]);
1508 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
1509 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
1510 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
1511 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
1512 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
1513 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
1514 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
1515 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
1516 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
1517 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
1518 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
1519 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
1520 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
1521 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
1522 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
1523 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
1524 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
1525 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
1526 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
1527 ns->mac_short_packet_dropped);
1528 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
1529 ns->checksum_error);
1530 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
1531 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
1534 /* Reset the statistics */
1536 i40e_dev_stats_reset(struct rte_eth_dev *dev)
1538 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1540 /* It results in reloading the start point of each counter */
1541 pf->offset_loaded = false;
1545 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
1546 __rte_unused uint16_t queue_id,
1547 __rte_unused uint8_t stat_idx,
1548 __rte_unused uint8_t is_rx)
1550 PMD_INIT_FUNC_TRACE();
1556 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1558 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1559 struct i40e_vsi *vsi = pf->main_vsi;
1561 dev_info->max_rx_queues = vsi->nb_qps;
1562 dev_info->max_tx_queues = vsi->nb_qps;
1563 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
1564 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
1565 dev_info->max_mac_addrs = vsi->max_macaddrs;
1566 dev_info->max_vfs = dev->pci_dev->max_vfs;
1567 dev_info->rx_offload_capa =
1568 DEV_RX_OFFLOAD_VLAN_STRIP |
1569 DEV_RX_OFFLOAD_QINQ_STRIP |
1570 DEV_RX_OFFLOAD_IPV4_CKSUM |
1571 DEV_RX_OFFLOAD_UDP_CKSUM |
1572 DEV_RX_OFFLOAD_TCP_CKSUM;
1573 dev_info->tx_offload_capa =
1574 DEV_TX_OFFLOAD_VLAN_INSERT |
1575 DEV_TX_OFFLOAD_QINQ_INSERT |
1576 DEV_TX_OFFLOAD_IPV4_CKSUM |
1577 DEV_TX_OFFLOAD_UDP_CKSUM |
1578 DEV_TX_OFFLOAD_TCP_CKSUM |
1579 DEV_TX_OFFLOAD_SCTP_CKSUM |
1580 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1581 DEV_TX_OFFLOAD_TCP_TSO;
1582 dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
1584 dev_info->reta_size = pf->hash_lut_size;
1585 dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
1587 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1589 .pthresh = I40E_DEFAULT_RX_PTHRESH,
1590 .hthresh = I40E_DEFAULT_RX_HTHRESH,
1591 .wthresh = I40E_DEFAULT_RX_WTHRESH,
1593 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
1597 dev_info->default_txconf = (struct rte_eth_txconf) {
1599 .pthresh = I40E_DEFAULT_TX_PTHRESH,
1600 .hthresh = I40E_DEFAULT_TX_HTHRESH,
1601 .wthresh = I40E_DEFAULT_TX_WTHRESH,
1603 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
1604 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
1605 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1606 ETH_TXQ_FLAGS_NOOFFLOADS,
1609 if (pf->flags & I40E_FLAG_VMDQ) {
1610 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
1611 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
1612 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
1613 pf->max_nb_vmdq_vsi;
1614 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
1615 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
1616 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
1621 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1623 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1624 struct i40e_vsi *vsi = pf->main_vsi;
1625 PMD_INIT_FUNC_TRACE();
1628 return i40e_vsi_add_vlan(vsi, vlan_id);
1630 return i40e_vsi_delete_vlan(vsi, vlan_id);
1634 i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
1635 __rte_unused uint16_t tpid)
1637 PMD_INIT_FUNC_TRACE();
1641 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1643 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1644 struct i40e_vsi *vsi = pf->main_vsi;
1646 if (mask & ETH_VLAN_STRIP_MASK) {
1647 /* Enable or disable VLAN stripping */
1648 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1649 i40e_vsi_config_vlan_stripping(vsi, TRUE);
1651 i40e_vsi_config_vlan_stripping(vsi, FALSE);
1654 if (mask & ETH_VLAN_EXTEND_MASK) {
1655 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1656 i40e_vsi_config_double_vlan(vsi, TRUE);
1658 i40e_vsi_config_double_vlan(vsi, FALSE);
1663 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
1664 __rte_unused uint16_t queue,
1665 __rte_unused int on)
1667 PMD_INIT_FUNC_TRACE();
1671 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1673 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1674 struct i40e_vsi *vsi = pf->main_vsi;
1675 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
1676 struct i40e_vsi_vlan_pvid_info info;
1678 memset(&info, 0, sizeof(info));
1681 info.config.pvid = pvid;
1683 info.config.reject.tagged =
1684 data->dev_conf.txmode.hw_vlan_reject_tagged;
1685 info.config.reject.untagged =
1686 data->dev_conf.txmode.hw_vlan_reject_untagged;
1689 return i40e_vsi_vlan_pvid_set(vsi, &info);
1693 i40e_dev_led_on(struct rte_eth_dev *dev)
1695 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1696 uint32_t mode = i40e_led_get(hw);
1699 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
1705 i40e_dev_led_off(struct rte_eth_dev *dev)
1707 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1708 uint32_t mode = i40e_led_get(hw);
1711 i40e_led_set(hw, 0, false);
1717 i40e_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1718 __rte_unused struct rte_eth_fc_conf *fc_conf)
1720 PMD_INIT_FUNC_TRACE();
1726 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
1727 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
1729 PMD_INIT_FUNC_TRACE();
1734 /* Add a MAC address, and update filters */
1736 i40e_macaddr_add(struct rte_eth_dev *dev,
1737 struct ether_addr *mac_addr,
1738 __rte_unused uint32_t index,
1741 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1742 struct i40e_mac_filter_info mac_filter;
1743 struct i40e_vsi *vsi;
1746 /* If VMDQ not enabled or configured, return */
1747 if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) {
1748 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
1749 pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled",
1754 if (pool > pf->nb_cfg_vmdq_vsi) {
1755 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
1756 pool, pf->nb_cfg_vmdq_vsi);
1760 (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
1761 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
1766 vsi = pf->vmdq[pool - 1].vsi;
1768 ret = i40e_vsi_add_mac(vsi, &mac_filter);
1769 if (ret != I40E_SUCCESS) {
1770 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
1775 /* Remove a MAC address, and update filters */
1777 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1779 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1780 struct i40e_vsi *vsi;
1781 struct rte_eth_dev_data *data = dev->data;
1782 struct ether_addr *macaddr;
1787 macaddr = &(data->mac_addrs[index]);
1789 pool_sel = dev->data->mac_pool_sel[index];
1791 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
1792 if (pool_sel & (1ULL << i)) {
1796 /* No VMDQ pool enabled or configured */
1797 if (!(pf->flags | I40E_FLAG_VMDQ) ||
1798 (i > pf->nb_cfg_vmdq_vsi)) {
1799 PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
1803 vsi = pf->vmdq[i - 1].vsi;
1805 ret = i40e_vsi_delete_mac(vsi, macaddr);
1808 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
1815 /* Set perfect match or hash match of MAC and VLAN for a VF */
1817 i40e_vf_mac_filter_set(struct i40e_pf *pf,
1818 struct rte_eth_mac_filter *filter,
1822 struct i40e_mac_filter_info mac_filter;
1823 struct ether_addr old_mac;
1824 struct ether_addr *new_mac;
1825 struct i40e_pf_vf *vf = NULL;
1830 PMD_DRV_LOG(ERR, "Invalid PF argument.");
1833 hw = I40E_PF_TO_HW(pf);
1835 if (filter == NULL) {
1836 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
1840 new_mac = &filter->mac_addr;
1842 if (is_zero_ether_addr(new_mac)) {
1843 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
1847 vf_id = filter->dst_id;
1849 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
1850 PMD_DRV_LOG(ERR, "Invalid argument.");
1853 vf = &pf->vfs[vf_id];
1855 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
1856 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
1861 (void)rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
1862 (void)rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
1864 (void)rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
1867 mac_filter.filter_type = filter->filter_type;
1868 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
1869 if (ret != I40E_SUCCESS) {
1870 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
1873 ether_addr_copy(new_mac, &pf->dev_addr);
1875 (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
1877 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
1878 if (ret != I40E_SUCCESS) {
1879 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
1883 /* Clear device address as it has been removed */
1884 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
1885 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
1891 /* MAC filter handle */
1893 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
1896 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1897 struct rte_eth_mac_filter *filter;
1898 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1899 int ret = I40E_NOT_SUPPORTED;
1901 filter = (struct rte_eth_mac_filter *)(arg);
1903 switch (filter_op) {
1904 case RTE_ETH_FILTER_NOP:
1907 case RTE_ETH_FILTER_ADD:
1908 i40e_pf_disable_irq0(hw);
1910 ret = i40e_vf_mac_filter_set(pf, filter, 1);
1911 i40e_pf_enable_irq0(hw);
1913 case RTE_ETH_FILTER_DELETE:
1914 i40e_pf_disable_irq0(hw);
1916 ret = i40e_vf_mac_filter_set(pf, filter, 0);
1917 i40e_pf_enable_irq0(hw);
1920 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
1921 ret = I40E_ERR_PARAM;
1929 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
1930 struct rte_eth_rss_reta_entry64 *reta_conf,
1933 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1934 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1936 uint16_t i, j, lut_size = pf->hash_lut_size;
1937 uint16_t idx, shift;
1940 if (reta_size != lut_size ||
1941 reta_size > ETH_RSS_RETA_SIZE_512) {
1942 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1943 "(%d) doesn't match the number hardware can supported "
1944 "(%d)\n", reta_size, lut_size);
1948 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1949 idx = i / RTE_RETA_GROUP_SIZE;
1950 shift = i % RTE_RETA_GROUP_SIZE;
1951 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
1955 if (mask == I40E_4_BIT_MASK)
1958 l = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
1959 for (j = 0, lut = 0; j < I40E_4_BIT_WIDTH; j++) {
1960 if (mask & (0x1 << j))
1961 lut |= reta_conf[idx].reta[shift + j] <<
1964 lut |= l & (I40E_8_BIT_MASK << (CHAR_BIT * j));
1966 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
1973 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
1974 struct rte_eth_rss_reta_entry64 *reta_conf,
1977 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1978 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1980 uint16_t i, j, lut_size = pf->hash_lut_size;
1981 uint16_t idx, shift;
1984 if (reta_size != lut_size ||
1985 reta_size > ETH_RSS_RETA_SIZE_512) {
1986 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
1987 "(%d) doesn't match the number hardware can supported "
1988 "(%d)\n", reta_size, lut_size);
1992 for (i = 0; i < reta_size; i += I40E_4_BIT_WIDTH) {
1993 idx = i / RTE_RETA_GROUP_SIZE;
1994 shift = i % RTE_RETA_GROUP_SIZE;
1995 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2000 lut = I40E_READ_REG(hw, I40E_PFQF_HLUT(i >> 2));
2001 for (j = 0; j < I40E_4_BIT_WIDTH; j++) {
2002 if (mask & (0x1 << j))
2003 reta_conf[idx].reta[shift + j] = ((lut >>
2004 (CHAR_BIT * j)) & I40E_8_BIT_MASK);
2012 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
2013 * @hw: pointer to the HW structure
2014 * @mem: pointer to mem struct to fill out
2015 * @size: size of memory requested
2016 * @alignment: what to align the allocation to
2018 enum i40e_status_code
2019 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2020 struct i40e_dma_mem *mem,
2024 static uint64_t id = 0;
2025 const struct rte_memzone *mz = NULL;
2026 char z_name[RTE_MEMZONE_NAMESIZE];
2029 return I40E_ERR_PARAM;
2032 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
2033 #ifdef RTE_LIBRTE_XEN_DOM0
2034 mz = rte_memzone_reserve_bounded(z_name, size, 0, 0, alignment,
2037 mz = rte_memzone_reserve_aligned(z_name, size, 0, 0, alignment);
2040 return I40E_ERR_NO_MEMORY;
2045 #ifdef RTE_LIBRTE_XEN_DOM0
2046 mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
2048 mem->pa = mz->phys_addr;
2051 return I40E_SUCCESS;
2055 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
2056 * @hw: pointer to the HW structure
2057 * @mem: ptr to mem struct to free
2059 enum i40e_status_code
2060 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2061 struct i40e_dma_mem *mem)
2063 if (!mem || !mem->va)
2064 return I40E_ERR_PARAM;
2069 return I40E_SUCCESS;
2073 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
2074 * @hw: pointer to the HW structure
2075 * @mem: pointer to mem struct to fill out
2076 * @size: size of memory requested
2078 enum i40e_status_code
2079 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2080 struct i40e_virt_mem *mem,
2084 return I40E_ERR_PARAM;
2087 mem->va = rte_zmalloc("i40e", size, 0);
2090 return I40E_SUCCESS;
2092 return I40E_ERR_NO_MEMORY;
2096 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
2097 * @hw: pointer to the HW structure
2098 * @mem: pointer to mem struct to free
2100 enum i40e_status_code
2101 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
2102 struct i40e_virt_mem *mem)
2105 return I40E_ERR_PARAM;
2110 return I40E_SUCCESS;
2114 i40e_init_spinlock_d(struct i40e_spinlock *sp)
2116 rte_spinlock_init(&sp->spinlock);
2120 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
2122 rte_spinlock_lock(&sp->spinlock);
2126 i40e_release_spinlock_d(struct i40e_spinlock *sp)
2128 rte_spinlock_unlock(&sp->spinlock);
2132 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
2138 * Get the hardware capabilities, which will be parsed
2139 * and saved into struct i40e_hw.
2142 i40e_get_cap(struct i40e_hw *hw)
2144 struct i40e_aqc_list_capabilities_element_resp *buf;
2145 uint16_t len, size = 0;
2148 /* Calculate a huge enough buff for saving response data temporarily */
2149 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
2150 I40E_MAX_CAP_ELE_NUM;
2151 buf = rte_zmalloc("i40e", len, 0);
2153 PMD_DRV_LOG(ERR, "Failed to allocate memory");
2154 return I40E_ERR_NO_MEMORY;
2157 /* Get, parse the capabilities and save it to hw */
2158 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
2159 i40e_aqc_opc_list_func_capabilities, NULL);
2160 if (ret != I40E_SUCCESS)
2161 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
2163 /* Free the temporary buffer after being used */
2170 i40e_pf_parameter_init(struct rte_eth_dev *dev)
2172 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2173 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2174 uint16_t sum_queues = 0, sum_vsis, left_queues;
2176 /* First check if FW support SRIOV */
2177 if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
2178 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
2182 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
2183 pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
2184 PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
2185 /* Allocate queues for pf */
2186 if (hw->func_caps.rss) {
2187 pf->flags |= I40E_FLAG_RSS;
2188 pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
2189 (uint32_t)(1 << hw->func_caps.rss_table_entry_width));
2190 pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
2193 sum_queues = pf->lan_nb_qps;
2194 /* Default VSI is not counted in */
2196 PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
2198 if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
2199 pf->flags |= I40E_FLAG_SRIOV;
2200 pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
2201 if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
2202 PMD_INIT_LOG(ERR, "Config VF number %u, "
2203 "max supported %u.",
2204 dev->pci_dev->max_vfs,
2205 hw->func_caps.num_vfs);
2208 if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
2209 PMD_INIT_LOG(ERR, "FVL VF queue %u, "
2210 "max support %u queues.",
2211 pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
2214 pf->vf_num = dev->pci_dev->max_vfs;
2215 sum_queues += pf->vf_nb_qps * pf->vf_num;
2216 sum_vsis += pf->vf_num;
2217 PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
2218 pf->vf_num, pf->vf_nb_qps);
2222 if (hw->func_caps.vmdq) {
2223 pf->flags |= I40E_FLAG_VMDQ;
2224 pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2225 pf->max_nb_vmdq_vsi = 1;
2227 * If VMDQ available, assume a single VSI can be created. Will adjust
2230 sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
2231 sum_vsis += pf->max_nb_vmdq_vsi;
2233 pf->vmdq_nb_qps = 0;
2234 pf->max_nb_vmdq_vsi = 0;
2236 pf->nb_cfg_vmdq_vsi = 0;
2238 if (hw->func_caps.fd) {
2239 pf->flags |= I40E_FLAG_FDIR;
2240 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
2242 * Each flow director consumes one VSI and one queue,
2243 * but can't calculate out predictably here.
2247 if (sum_vsis > pf->max_num_vsi ||
2248 sum_queues > hw->func_caps.num_rx_qp) {
2249 PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
2250 PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
2251 pf->max_num_vsi, sum_vsis);
2252 PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
2253 hw->func_caps.num_rx_qp, sum_queues);
2257 /* Adjust VMDQ setting to support as many VMs as possible */
2258 if (pf->flags & I40E_FLAG_VMDQ) {
2259 left_queues = hw->func_caps.num_rx_qp - sum_queues;
2261 pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
2262 pf->max_num_vsi - sum_vsis);
2264 /* Limit the max VMDQ number that rte_ether that can support */
2265 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
2268 PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
2269 pf->max_nb_vmdq_vsi);
2270 PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
2273 /* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
2275 if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
2276 PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
2277 sum_vsis, hw->func_caps.num_msix_vectors);
2280 return I40E_SUCCESS;
2284 i40e_pf_get_switch_config(struct i40e_pf *pf)
2286 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2287 struct i40e_aqc_get_switch_config_resp *switch_config;
2288 struct i40e_aqc_switch_config_element_resp *element;
2289 uint16_t start_seid = 0, num_reported;
2292 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
2293 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
2294 if (!switch_config) {
2295 PMD_DRV_LOG(ERR, "Failed to allocated memory");
2299 /* Get the switch configurations */
2300 ret = i40e_aq_get_switch_config(hw, switch_config,
2301 I40E_AQ_LARGE_BUF, &start_seid, NULL);
2302 if (ret != I40E_SUCCESS) {
2303 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
2306 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
2307 if (num_reported != 1) { /* The number should be 1 */
2308 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
2312 /* Parse the switch configuration elements */
2313 element = &(switch_config->element[0]);
2314 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
2315 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
2316 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
2318 PMD_DRV_LOG(INFO, "Unknown element type");
2321 rte_free(switch_config);
2327 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
2330 struct pool_entry *entry;
2332 if (pool == NULL || num == 0)
2335 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
2336 if (entry == NULL) {
2337 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
2341 /* queue heap initialize */
2342 pool->num_free = num;
2343 pool->num_alloc = 0;
2345 LIST_INIT(&pool->alloc_list);
2346 LIST_INIT(&pool->free_list);
2348 /* Initialize element */
2352 LIST_INSERT_HEAD(&pool->free_list, entry, next);
2357 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
2359 struct pool_entry *entry;
2364 LIST_FOREACH(entry, &pool->alloc_list, next) {
2365 LIST_REMOVE(entry, next);
2369 LIST_FOREACH(entry, &pool->free_list, next) {
2370 LIST_REMOVE(entry, next);
2375 pool->num_alloc = 0;
2377 LIST_INIT(&pool->alloc_list);
2378 LIST_INIT(&pool->free_list);
2382 i40e_res_pool_free(struct i40e_res_pool_info *pool,
2385 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
2386 uint32_t pool_offset;
2390 PMD_DRV_LOG(ERR, "Invalid parameter");
2394 pool_offset = base - pool->base;
2395 /* Lookup in alloc list */
2396 LIST_FOREACH(entry, &pool->alloc_list, next) {
2397 if (entry->base == pool_offset) {
2398 valid_entry = entry;
2399 LIST_REMOVE(entry, next);
2404 /* Not find, return */
2405 if (valid_entry == NULL) {
2406 PMD_DRV_LOG(ERR, "Failed to find entry");
2411 * Found it, move it to free list and try to merge.
2412 * In order to make merge easier, always sort it by qbase.
2413 * Find adjacent prev and last entries.
2416 LIST_FOREACH(entry, &pool->free_list, next) {
2417 if (entry->base > valid_entry->base) {
2425 /* Try to merge with next one*/
2427 /* Merge with next one */
2428 if (valid_entry->base + valid_entry->len == next->base) {
2429 next->base = valid_entry->base;
2430 next->len += valid_entry->len;
2431 rte_free(valid_entry);
2438 /* Merge with previous one */
2439 if (prev->base + prev->len == valid_entry->base) {
2440 prev->len += valid_entry->len;
2441 /* If it merge with next one, remove next node */
2443 LIST_REMOVE(valid_entry, next);
2444 rte_free(valid_entry);
2446 rte_free(valid_entry);
2452 /* Not find any entry to merge, insert */
2455 LIST_INSERT_AFTER(prev, valid_entry, next);
2456 else if (next != NULL)
2457 LIST_INSERT_BEFORE(next, valid_entry, next);
2458 else /* It's empty list, insert to head */
2459 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
2462 pool->num_free += valid_entry->len;
2463 pool->num_alloc -= valid_entry->len;
2469 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
2472 struct pool_entry *entry, *valid_entry;
2474 if (pool == NULL || num == 0) {
2475 PMD_DRV_LOG(ERR, "Invalid parameter");
2479 if (pool->num_free < num) {
2480 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
2481 num, pool->num_free);
2486 /* Lookup in free list and find most fit one */
2487 LIST_FOREACH(entry, &pool->free_list, next) {
2488 if (entry->len >= num) {
2490 if (entry->len == num) {
2491 valid_entry = entry;
2494 if (valid_entry == NULL || valid_entry->len > entry->len)
2495 valid_entry = entry;
2499 /* Not find one to satisfy the request, return */
2500 if (valid_entry == NULL) {
2501 PMD_DRV_LOG(ERR, "No valid entry found");
2505 * The entry have equal queue number as requested,
2506 * remove it from alloc_list.
2508 if (valid_entry->len == num) {
2509 LIST_REMOVE(valid_entry, next);
2512 * The entry have more numbers than requested,
2513 * create a new entry for alloc_list and minus its
2514 * queue base and number in free_list.
2516 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
2517 if (entry == NULL) {
2518 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2522 entry->base = valid_entry->base;
2524 valid_entry->base += num;
2525 valid_entry->len -= num;
2526 valid_entry = entry;
2529 /* Insert it into alloc list, not sorted */
2530 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
2532 pool->num_free -= valid_entry->len;
2533 pool->num_alloc += valid_entry->len;
2535 return (valid_entry->base + pool->base);
2539 * bitmap_is_subset - Check whether src2 is subset of src1
2542 bitmap_is_subset(uint8_t src1, uint8_t src2)
2544 return !((src1 ^ src2) & src2);
2548 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2550 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2552 /* If DCB is not supported, only default TC is supported */
2553 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
2554 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
2558 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
2559 PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
2560 "HW support 0x%x", hw->func_caps.enabled_tcmap,
2564 return I40E_SUCCESS;
2568 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
2569 struct i40e_vsi_vlan_pvid_info *info)
2572 struct i40e_vsi_context ctxt;
2573 uint8_t vlan_flags = 0;
2576 if (vsi == NULL || info == NULL) {
2577 PMD_DRV_LOG(ERR, "invalid parameters");
2578 return I40E_ERR_PARAM;
2582 vsi->info.pvid = info->config.pvid;
2584 * If insert pvid is enabled, only tagged pkts are
2585 * allowed to be sent out.
2587 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
2588 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2591 if (info->config.reject.tagged == 0)
2592 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
2594 if (info->config.reject.untagged == 0)
2595 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2597 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
2598 I40E_AQ_VSI_PVLAN_MODE_MASK);
2599 vsi->info.port_vlan_flags |= vlan_flags;
2600 vsi->info.valid_sections =
2601 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
2602 memset(&ctxt, 0, sizeof(ctxt));
2603 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2604 ctxt.seid = vsi->seid;
2606 hw = I40E_VSI_TO_HW(vsi);
2607 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2608 if (ret != I40E_SUCCESS)
2609 PMD_DRV_LOG(ERR, "Failed to update VSI params");
2615 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
2617 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2619 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
2621 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2622 if (ret != I40E_SUCCESS)
2626 PMD_DRV_LOG(ERR, "seid not valid");
2630 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
2631 tc_bw_data.tc_valid_bits = enabled_tcmap;
2632 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2633 tc_bw_data.tc_bw_credits[i] =
2634 (enabled_tcmap & (1 << i)) ? 1 : 0;
2636 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
2637 if (ret != I40E_SUCCESS) {
2638 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
2642 (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
2643 sizeof(vsi->info.qs_handle));
2644 return I40E_SUCCESS;
2648 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
2649 struct i40e_aqc_vsi_properties_data *info,
2650 uint8_t enabled_tcmap)
2652 int ret, total_tc = 0, i;
2653 uint16_t qpnum_per_tc, bsf, qp_idx;
2655 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
2656 if (ret != I40E_SUCCESS)
2659 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
2660 if (enabled_tcmap & (1 << i))
2662 vsi->enabled_tc = enabled_tcmap;
2664 /* Number of queues per enabled TC */
2665 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
2666 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
2667 bsf = rte_bsf32(qpnum_per_tc);
2669 /* Adjust the queue number to actual queues that can be applied */
2670 vsi->nb_qps = qpnum_per_tc * total_tc;
2673 * Configure TC and queue mapping parameters, for enabled TC,
2674 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
2675 * default queue will serve it.
2678 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2679 if (vsi->enabled_tc & (1 << i)) {
2680 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
2681 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2682 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2683 qp_idx += qpnum_per_tc;
2685 info->tc_mapping[i] = 0;
2688 /* Associate queue number with VSI */
2689 if (vsi->type == I40E_VSI_SRIOV) {
2690 info->mapping_flags |=
2691 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2692 for (i = 0; i < vsi->nb_qps; i++)
2693 info->queue_mapping[i] =
2694 rte_cpu_to_le_16(vsi->base_queue + i);
2696 info->mapping_flags |=
2697 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2698 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2700 info->valid_sections |=
2701 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2703 return I40E_SUCCESS;
2707 i40e_veb_release(struct i40e_veb *veb)
2709 struct i40e_vsi *vsi;
2712 if (veb == NULL || veb->associate_vsi == NULL)
2715 if (!TAILQ_EMPTY(&veb->head)) {
2716 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
2720 vsi = veb->associate_vsi;
2721 hw = I40E_VSI_TO_HW(vsi);
2723 vsi->uplink_seid = veb->uplink_seid;
2724 i40e_aq_delete_element(hw, veb->seid, NULL);
2727 return I40E_SUCCESS;
2731 static struct i40e_veb *
2732 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
2734 struct i40e_veb *veb;
2738 if (NULL == pf || vsi == NULL) {
2739 PMD_DRV_LOG(ERR, "veb setup failed, "
2740 "associated VSI shouldn't null");
2743 hw = I40E_PF_TO_HW(pf);
2745 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
2747 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
2751 veb->associate_vsi = vsi;
2752 TAILQ_INIT(&veb->head);
2753 veb->uplink_seid = vsi->uplink_seid;
2755 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
2756 I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL);
2758 if (ret != I40E_SUCCESS) {
2759 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
2760 hw->aq.asq_last_status);
2764 /* get statistics index */
2765 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
2766 &veb->stats_idx, NULL, NULL, NULL);
2767 if (ret != I40E_SUCCESS) {
2768 PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
2769 hw->aq.asq_last_status);
2773 /* Get VEB bandwidth, to be implemented */
2774 /* Now associated vsi binding to the VEB, set uplink to this VEB */
2775 vsi->uplink_seid = veb->seid;
2784 i40e_vsi_release(struct i40e_vsi *vsi)
2788 struct i40e_vsi_list *vsi_list;
2790 struct i40e_mac_filter *f;
2793 return I40E_SUCCESS;
2795 pf = I40E_VSI_TO_PF(vsi);
2796 hw = I40E_VSI_TO_HW(vsi);
2798 /* VSI has child to attach, release child first */
2800 TAILQ_FOREACH(vsi_list, &vsi->veb->head, list) {
2801 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
2803 TAILQ_REMOVE(&vsi->veb->head, vsi_list, list);
2805 i40e_veb_release(vsi->veb);
2808 /* Remove all macvlan filters of the VSI */
2809 i40e_vsi_remove_all_macvlan_filter(vsi);
2810 TAILQ_FOREACH(f, &vsi->mac_list, next)
2813 if (vsi->type != I40E_VSI_MAIN) {
2814 /* Remove vsi from parent's sibling list */
2815 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
2816 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
2817 return I40E_ERR_PARAM;
2819 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
2820 &vsi->sib_vsi_list, list);
2822 /* Remove all switch element of the VSI */
2823 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
2824 if (ret != I40E_SUCCESS)
2825 PMD_DRV_LOG(ERR, "Failed to delete element");
2827 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
2829 if (vsi->type != I40E_VSI_SRIOV)
2830 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
2833 return I40E_SUCCESS;
2837 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
2839 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2840 struct i40e_aqc_remove_macvlan_element_data def_filter;
2841 struct i40e_mac_filter_info filter;
2844 if (vsi->type != I40E_VSI_MAIN)
2845 return I40E_ERR_CONFIG;
2846 memset(&def_filter, 0, sizeof(def_filter));
2847 (void)rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
2849 def_filter.vlan_tag = 0;
2850 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
2851 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2852 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
2853 if (ret != I40E_SUCCESS) {
2854 struct i40e_mac_filter *f;
2855 struct ether_addr *mac;
2857 PMD_DRV_LOG(WARNING, "Cannot remove the default "
2859 /* It needs to add the permanent mac into mac list */
2860 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
2862 PMD_DRV_LOG(ERR, "failed to allocate memory");
2863 return I40E_ERR_NO_MEMORY;
2865 mac = &f->mac_info.mac_addr;
2866 (void)rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
2868 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2869 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
2874 (void)rte_memcpy(&filter.mac_addr,
2875 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
2876 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2877 return i40e_vsi_add_mac(vsi, &filter);
2881 i40e_vsi_dump_bw_config(struct i40e_vsi *vsi)
2883 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
2884 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
2885 struct i40e_hw *hw = &vsi->adapter->hw;
2889 memset(&bw_config, 0, sizeof(bw_config));
2890 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
2891 if (ret != I40E_SUCCESS) {
2892 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
2893 hw->aq.asq_last_status);
2897 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
2898 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
2899 &ets_sla_config, NULL);
2900 if (ret != I40E_SUCCESS) {
2901 PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
2902 "configuration %u", hw->aq.asq_last_status);
2906 /* Not store the info yet, just print out */
2907 PMD_DRV_LOG(INFO, "VSI bw limit:%u", bw_config.port_bw_limit);
2908 PMD_DRV_LOG(INFO, "VSI max_bw:%u", bw_config.max_bw);
2909 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2910 PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u", i,
2911 ets_sla_config.share_credits[i]);
2912 PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u", i,
2913 rte_le_to_cpu_16(ets_sla_config.credits[i]));
2914 PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
2915 rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
2924 i40e_vsi_setup(struct i40e_pf *pf,
2925 enum i40e_vsi_type type,
2926 struct i40e_vsi *uplink_vsi,
2927 uint16_t user_param)
2929 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2930 struct i40e_vsi *vsi;
2931 struct i40e_mac_filter_info filter;
2933 struct i40e_vsi_context ctxt;
2934 struct ether_addr broadcast =
2935 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
2937 if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
2938 PMD_DRV_LOG(ERR, "VSI setup failed, "
2939 "VSI link shouldn't be NULL");
2943 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
2944 PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
2945 "uplink VSI should be NULL");
2949 /* If uplink vsi didn't setup VEB, create one first */
2950 if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) {
2951 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
2953 if (NULL == uplink_vsi->veb) {
2954 PMD_DRV_LOG(ERR, "VEB setup failed");
2959 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
2961 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
2964 TAILQ_INIT(&vsi->mac_list);
2966 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
2967 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
2968 vsi->parent_vsi = uplink_vsi;
2969 vsi->user_param = user_param;
2970 /* Allocate queues */
2971 switch (vsi->type) {
2972 case I40E_VSI_MAIN :
2973 vsi->nb_qps = pf->lan_nb_qps;
2975 case I40E_VSI_SRIOV :
2976 vsi->nb_qps = pf->vf_nb_qps;
2978 case I40E_VSI_VMDQ2:
2979 vsi->nb_qps = pf->vmdq_nb_qps;
2982 vsi->nb_qps = pf->fdir_nb_qps;
2988 * The filter status descriptor is reported in rx queue 0,
2989 * while the tx queue for fdir filter programming has no
2990 * such constraints, can be non-zero queues.
2991 * To simplify it, choose FDIR vsi use queue 0 pair.
2992 * To make sure it will use queue 0 pair, queue allocation
2993 * need be done before this function is called
2995 if (type != I40E_VSI_FDIR) {
2996 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
2998 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
3002 vsi->base_queue = ret;
3004 vsi->base_queue = I40E_FDIR_QUEUE_ID;
3006 /* VF has MSIX interrupt in VF range, don't allocate here */
3007 if (type != I40E_VSI_SRIOV) {
3008 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
3010 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
3011 goto fail_queue_alloc;
3013 vsi->msix_intr = ret;
3017 if (type == I40E_VSI_MAIN) {
3018 /* For main VSI, no need to add since it's default one */
3019 vsi->uplink_seid = pf->mac_seid;
3020 vsi->seid = pf->main_vsi_seid;
3021 /* Bind queues with specific MSIX interrupt */
3023 * Needs 2 interrupt at least, one for misc cause which will
3024 * enabled from OS side, Another for queues binding the
3025 * interrupt from device side only.
3028 /* Get default VSI parameters from hardware */
3029 memset(&ctxt, 0, sizeof(ctxt));
3030 ctxt.seid = vsi->seid;
3031 ctxt.pf_num = hw->pf_id;
3032 ctxt.uplink_seid = vsi->uplink_seid;
3034 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
3035 if (ret != I40E_SUCCESS) {
3036 PMD_DRV_LOG(ERR, "Failed to get VSI params");
3037 goto fail_msix_alloc;
3039 (void)rte_memcpy(&vsi->info, &ctxt.info,
3040 sizeof(struct i40e_aqc_vsi_properties_data));
3041 vsi->vsi_id = ctxt.vsi_number;
3042 vsi->info.valid_sections = 0;
3044 /* Configure tc, enabled TC0 only */
3045 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
3047 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
3048 goto fail_msix_alloc;
3051 /* TC, queue mapping */
3052 memset(&ctxt, 0, sizeof(ctxt));
3053 vsi->info.valid_sections |=
3054 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3055 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3056 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3057 (void)rte_memcpy(&ctxt.info, &vsi->info,
3058 sizeof(struct i40e_aqc_vsi_properties_data));
3059 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3060 I40E_DEFAULT_TCMAP);
3061 if (ret != I40E_SUCCESS) {
3062 PMD_DRV_LOG(ERR, "Failed to configure "
3063 "TC queue mapping");
3064 goto fail_msix_alloc;
3066 ctxt.seid = vsi->seid;
3067 ctxt.pf_num = hw->pf_id;
3068 ctxt.uplink_seid = vsi->uplink_seid;
3071 /* Update VSI parameters */
3072 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3073 if (ret != I40E_SUCCESS) {
3074 PMD_DRV_LOG(ERR, "Failed to update VSI params");
3075 goto fail_msix_alloc;
3078 (void)rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
3079 sizeof(vsi->info.tc_mapping));
3080 (void)rte_memcpy(&vsi->info.queue_mapping,
3081 &ctxt.info.queue_mapping,
3082 sizeof(vsi->info.queue_mapping));
3083 vsi->info.mapping_flags = ctxt.info.mapping_flags;
3084 vsi->info.valid_sections = 0;
3086 (void)rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
3090 * Updating default filter settings are necessary to prevent
3091 * reception of tagged packets.
3092 * Some old firmware configurations load a default macvlan
3093 * filter which accepts both tagged and untagged packets.
3094 * The updating is to use a normal filter instead if needed.
3095 * For NVM 4.2.2 or after, the updating is not needed anymore.
3096 * The firmware with correct configurations load the default
3097 * macvlan filter which is expected and cannot be removed.
3099 i40e_update_default_filter_setting(vsi);
3100 i40e_config_qinq(hw, vsi);
3101 } else if (type == I40E_VSI_SRIOV) {
3102 memset(&ctxt, 0, sizeof(ctxt));
3104 * For other VSI, the uplink_seid equals to uplink VSI's
3105 * uplink_seid since they share same VEB
3107 vsi->uplink_seid = uplink_vsi->uplink_seid;
3108 ctxt.pf_num = hw->pf_id;
3109 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
3110 ctxt.uplink_seid = vsi->uplink_seid;
3111 ctxt.connection_type = 0x1;
3112 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
3115 * Do not configure switch ID to enable VEB switch by
3116 * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville,
3117 * if the source mac address of packet sent from VF is not
3118 * listed in the VEB's mac table, the VEB will switch the
3119 * packet back to the VF. Need to enable it when HW issue
3123 /* Configure port/vlan */
3124 ctxt.info.valid_sections |=
3125 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3126 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3127 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3128 I40E_DEFAULT_TCMAP);
3129 if (ret != I40E_SUCCESS) {
3130 PMD_DRV_LOG(ERR, "Failed to configure "
3131 "TC queue mapping");
3132 goto fail_msix_alloc;
3134 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3135 ctxt.info.valid_sections |=
3136 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3138 * Since VSI is not created yet, only configure parameter,
3139 * will add vsi below.
3142 i40e_config_qinq(hw, vsi);
3143 } else if (type == I40E_VSI_VMDQ2) {
3144 memset(&ctxt, 0, sizeof(ctxt));
3146 * For other VSI, the uplink_seid equals to uplink VSI's
3147 * uplink_seid since they share same VEB
3149 vsi->uplink_seid = uplink_vsi->uplink_seid;
3150 ctxt.pf_num = hw->pf_id;
3152 ctxt.uplink_seid = vsi->uplink_seid;
3153 ctxt.connection_type = 0x1;
3154 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
3156 ctxt.info.valid_sections |=
3157 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
3158 /* user_param carries flag to enable loop back */
3160 ctxt.info.switch_id =
3161 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
3162 ctxt.info.switch_id |=
3163 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
3166 /* Configure port/vlan */
3167 ctxt.info.valid_sections |=
3168 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3169 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
3170 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3171 I40E_DEFAULT_TCMAP);
3172 if (ret != I40E_SUCCESS) {
3173 PMD_DRV_LOG(ERR, "Failed to configure "
3174 "TC queue mapping");
3175 goto fail_msix_alloc;
3177 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3178 ctxt.info.valid_sections |=
3179 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3180 } else if (type == I40E_VSI_FDIR) {
3181 memset(&ctxt, 0, sizeof(ctxt));
3182 vsi->uplink_seid = uplink_vsi->uplink_seid;
3183 ctxt.pf_num = hw->pf_id;
3185 ctxt.uplink_seid = vsi->uplink_seid;
3186 ctxt.connection_type = 0x1; /* regular data port */
3187 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
3188 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
3189 I40E_DEFAULT_TCMAP);
3190 if (ret != I40E_SUCCESS) {
3191 PMD_DRV_LOG(ERR, "Failed to configure "
3192 "TC queue mapping.");
3193 goto fail_msix_alloc;
3195 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
3196 ctxt.info.valid_sections |=
3197 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
3199 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
3200 goto fail_msix_alloc;
3203 if (vsi->type != I40E_VSI_MAIN) {
3204 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
3205 if (ret != I40E_SUCCESS) {
3206 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
3207 hw->aq.asq_last_status);
3208 goto fail_msix_alloc;
3210 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
3211 vsi->info.valid_sections = 0;
3212 vsi->seid = ctxt.seid;
3213 vsi->vsi_id = ctxt.vsi_number;
3214 vsi->sib_vsi_list.vsi = vsi;
3215 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
3216 &vsi->sib_vsi_list, list);
3219 /* MAC/VLAN configuration */
3220 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
3221 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3223 ret = i40e_vsi_add_mac(vsi, &filter);
3224 if (ret != I40E_SUCCESS) {
3225 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3226 goto fail_msix_alloc;
3229 /* Get VSI BW information */
3230 i40e_vsi_dump_bw_config(vsi);
3233 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
3235 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
3241 /* Configure vlan stripping on or off */
3243 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
3245 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3246 struct i40e_vsi_context ctxt;
3248 int ret = I40E_SUCCESS;
3250 /* Check if it has been already on or off */
3251 if (vsi->info.valid_sections &
3252 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
3254 if ((vsi->info.port_vlan_flags &
3255 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
3256 return 0; /* already on */
3258 if ((vsi->info.port_vlan_flags &
3259 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3260 I40E_AQ_VSI_PVLAN_EMOD_MASK)
3261 return 0; /* already off */
3266 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3268 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3269 vsi->info.valid_sections =
3270 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
3271 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
3272 vsi->info.port_vlan_flags |= vlan_flags;
3273 ctxt.seid = vsi->seid;
3274 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3275 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3277 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
3278 on ? "enable" : "disable");
3284 i40e_dev_init_vlan(struct rte_eth_dev *dev)
3286 struct rte_eth_dev_data *data = dev->data;
3289 /* Apply vlan offload setting */
3290 i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
3292 /* Apply double-vlan setting, not implemented yet */
3294 /* Apply pvid setting */
3295 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
3296 data->dev_conf.txmode.hw_vlan_insert_pvid);
3298 PMD_DRV_LOG(INFO, "Failed to update VSI params");
3304 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
3306 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
3308 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
3312 i40e_update_flow_control(struct i40e_hw *hw)
3314 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
3315 struct i40e_link_status link_status;
3316 uint32_t rxfc = 0, txfc = 0, reg;
3320 memset(&link_status, 0, sizeof(link_status));
3321 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
3322 if (ret != I40E_SUCCESS) {
3323 PMD_DRV_LOG(ERR, "Failed to get link status information");
3324 goto write_reg; /* Disable flow control */
3327 an_info = hw->phy.link_info.an_info;
3328 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
3329 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
3330 ret = I40E_ERR_NOT_READY;
3331 goto write_reg; /* Disable flow control */
3334 * If link auto negotiation is enabled, flow control needs to
3335 * be configured according to it
3337 switch (an_info & I40E_LINK_PAUSE_RXTX) {
3338 case I40E_LINK_PAUSE_RXTX:
3341 hw->fc.current_mode = I40E_FC_FULL;
3343 case I40E_AQ_LINK_PAUSE_RX:
3345 hw->fc.current_mode = I40E_FC_RX_PAUSE;
3347 case I40E_AQ_LINK_PAUSE_TX:
3349 hw->fc.current_mode = I40E_FC_TX_PAUSE;
3352 hw->fc.current_mode = I40E_FC_NONE;
3357 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
3358 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
3359 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3360 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
3361 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
3362 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
3369 i40e_pf_setup(struct i40e_pf *pf)
3371 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3372 struct i40e_filter_control_settings settings;
3373 struct i40e_vsi *vsi;
3376 /* Clear all stats counters */
3377 pf->offset_loaded = FALSE;
3378 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
3379 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
3381 ret = i40e_pf_get_switch_config(pf);
3382 if (ret != I40E_SUCCESS) {
3383 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
3386 if (pf->flags & I40E_FLAG_FDIR) {
3387 /* make queue allocated first, let FDIR use queue pair 0*/
3388 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
3389 if (ret != I40E_FDIR_QUEUE_ID) {
3390 PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
3392 pf->flags &= ~I40E_FLAG_FDIR;
3395 /* main VSI setup */
3396 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
3398 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
3399 return I40E_ERR_NOT_READY;
3403 /* Configure filter control */
3404 memset(&settings, 0, sizeof(settings));
3405 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
3406 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
3407 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
3408 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
3410 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
3411 hw->func_caps.rss_table_size);
3412 return I40E_ERR_PARAM;
3414 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
3415 "size: %u\n", hw->func_caps.rss_table_size);
3416 pf->hash_lut_size = hw->func_caps.rss_table_size;
3418 /* Enable ethtype and macvlan filters */
3419 settings.enable_ethtype = TRUE;
3420 settings.enable_macvlan = TRUE;
3421 ret = i40e_set_filter_control(hw, &settings);
3423 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
3426 /* Update flow control according to the auto negotiation */
3427 i40e_update_flow_control(hw);
3429 return I40E_SUCCESS;
3433 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3439 * Set or clear TX Queue Disable flags,
3440 * which is required by hardware.
3442 i40e_pre_tx_queue_cfg(hw, q_idx, on);
3443 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
3445 /* Wait until the request is finished */
3446 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3447 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3448 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3449 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3450 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
3456 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3457 return I40E_SUCCESS; /* already on, skip next steps */
3459 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
3460 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3462 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3463 return I40E_SUCCESS; /* already off, skip next steps */
3464 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3466 /* Write the register */
3467 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
3468 /* Check the result */
3469 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3470 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3471 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
3473 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3474 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
3477 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
3478 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
3482 /* Check if it is timeout */
3483 if (j >= I40E_CHK_Q_ENA_COUNT) {
3484 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
3485 (on ? "enable" : "disable"), q_idx);
3486 return I40E_ERR_TIMEOUT;
3489 return I40E_SUCCESS;
3492 /* Swith on or off the tx queues */
3494 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
3496 struct rte_eth_dev_data *dev_data = pf->dev_data;
3497 struct i40e_tx_queue *txq;
3498 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3502 for (i = 0; i < dev_data->nb_tx_queues; i++) {
3503 txq = dev_data->tx_queues[i];
3504 /* Don't operate the queue if not configured or
3505 * if starting only per queue */
3506 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
3509 ret = i40e_dev_tx_queue_start(dev, i);
3511 ret = i40e_dev_tx_queue_stop(dev, i);
3512 if ( ret != I40E_SUCCESS)
3516 return I40E_SUCCESS;
3520 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
3525 /* Wait until the request is finished */
3526 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3527 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3528 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3529 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
3530 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
3535 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
3536 return I40E_SUCCESS; /* Already on, skip next steps */
3537 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3539 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3540 return I40E_SUCCESS; /* Already off, skip next steps */
3541 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3544 /* Write the register */
3545 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
3546 /* Check the result */
3547 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
3548 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
3549 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
3551 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3552 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
3555 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
3556 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
3561 /* Check if it is timeout */
3562 if (j >= I40E_CHK_Q_ENA_COUNT) {
3563 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
3564 (on ? "enable" : "disable"), q_idx);
3565 return I40E_ERR_TIMEOUT;
3568 return I40E_SUCCESS;
3570 /* Switch on or off the rx queues */
3572 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
3574 struct rte_eth_dev_data *dev_data = pf->dev_data;
3575 struct i40e_rx_queue *rxq;
3576 struct rte_eth_dev *dev = pf->adapter->eth_dev;
3580 for (i = 0; i < dev_data->nb_rx_queues; i++) {
3581 rxq = dev_data->rx_queues[i];
3582 /* Don't operate the queue if not configured or
3583 * if starting only per queue */
3584 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
3587 ret = i40e_dev_rx_queue_start(dev, i);
3589 ret = i40e_dev_rx_queue_stop(dev, i);
3590 if (ret != I40E_SUCCESS)
3594 return I40E_SUCCESS;
3597 /* Switch on or off all the rx/tx queues */
3599 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
3604 /* enable rx queues before enabling tx queues */
3605 ret = i40e_dev_switch_rx_queues(pf, on);
3607 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
3610 ret = i40e_dev_switch_tx_queues(pf, on);
3612 /* Stop tx queues before stopping rx queues */
3613 ret = i40e_dev_switch_tx_queues(pf, on);
3615 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
3618 ret = i40e_dev_switch_rx_queues(pf, on);
3624 /* Initialize VSI for TX */
3626 i40e_dev_tx_init(struct i40e_pf *pf)
3628 struct rte_eth_dev_data *data = pf->dev_data;
3630 uint32_t ret = I40E_SUCCESS;
3631 struct i40e_tx_queue *txq;
3633 for (i = 0; i < data->nb_tx_queues; i++) {
3634 txq = data->tx_queues[i];
3635 if (!txq || !txq->q_set)
3637 ret = i40e_tx_queue_init(txq);
3638 if (ret != I40E_SUCCESS)
3645 /* Initialize VSI for RX */
3647 i40e_dev_rx_init(struct i40e_pf *pf)
3649 struct rte_eth_dev_data *data = pf->dev_data;
3650 int ret = I40E_SUCCESS;
3652 struct i40e_rx_queue *rxq;
3654 i40e_pf_config_mq_rx(pf);
3655 for (i = 0; i < data->nb_rx_queues; i++) {
3656 rxq = data->rx_queues[i];
3657 if (!rxq || !rxq->q_set)
3660 ret = i40e_rx_queue_init(rxq);
3661 if (ret != I40E_SUCCESS) {
3662 PMD_DRV_LOG(ERR, "Failed to do RX queue "
3672 i40e_dev_rxtx_init(struct i40e_pf *pf)
3676 err = i40e_dev_tx_init(pf);
3678 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
3681 err = i40e_dev_rx_init(pf);
3683 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
3691 i40e_vmdq_setup(struct rte_eth_dev *dev)
3693 struct rte_eth_conf *conf = &dev->data->dev_conf;
3694 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3695 int i, err, conf_vsis, j, loop;
3696 struct i40e_vsi *vsi;
3697 struct i40e_vmdq_info *vmdq_info;
3698 struct rte_eth_vmdq_rx_conf *vmdq_conf;
3699 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3702 * Disable interrupt to avoid message from VF. Furthermore, it will
3703 * avoid race condition in VSI creation/destroy.
3705 i40e_pf_disable_irq0(hw);
3707 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
3708 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
3712 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
3713 if (conf_vsis > pf->max_nb_vmdq_vsi) {
3714 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
3715 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
3716 pf->max_nb_vmdq_vsi);
3720 if (pf->vmdq != NULL) {
3721 PMD_INIT_LOG(INFO, "VMDQ already configured");
3725 pf->vmdq = rte_zmalloc("vmdq_info_struct",
3726 sizeof(*vmdq_info) * conf_vsis, 0);
3728 if (pf->vmdq == NULL) {
3729 PMD_INIT_LOG(ERR, "Failed to allocate memory");
3733 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
3735 /* Create VMDQ VSI */
3736 for (i = 0; i < conf_vsis; i++) {
3737 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
3738 vmdq_conf->enable_loop_back);
3740 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
3744 vmdq_info = &pf->vmdq[i];
3746 vmdq_info->vsi = vsi;
3748 pf->nb_cfg_vmdq_vsi = conf_vsis;
3750 /* Configure Vlan */
3751 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
3752 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
3753 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
3754 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
3755 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
3756 vmdq_conf->pool_map[i].vlan_id, j);
3758 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
3759 vmdq_conf->pool_map[i].vlan_id);
3761 PMD_INIT_LOG(ERR, "Failed to add vlan");
3769 i40e_pf_enable_irq0(hw);
3774 for (i = 0; i < conf_vsis; i++)
3775 if (pf->vmdq[i].vsi == NULL)
3778 i40e_vsi_release(pf->vmdq[i].vsi);
3782 i40e_pf_enable_irq0(hw);
3787 i40e_stat_update_32(struct i40e_hw *hw,
3795 new_data = (uint64_t)I40E_READ_REG(hw, reg);
3799 if (new_data >= *offset)
3800 *stat = (uint64_t)(new_data - *offset);
3802 *stat = (uint64_t)((new_data +
3803 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
3807 i40e_stat_update_48(struct i40e_hw *hw,
3816 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
3817 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
3818 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
3823 if (new_data >= *offset)
3824 *stat = new_data - *offset;
3826 *stat = (uint64_t)((new_data +
3827 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
3829 *stat &= I40E_48_BIT_MASK;
3834 i40e_pf_disable_irq0(struct i40e_hw *hw)
3836 /* Disable all interrupt types */
3837 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0);
3838 I40E_WRITE_FLUSH(hw);
3843 i40e_pf_enable_irq0(struct i40e_hw *hw)
3845 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
3846 I40E_PFINT_DYN_CTL0_INTENA_MASK |
3847 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3848 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
3849 I40E_WRITE_FLUSH(hw);
3853 i40e_pf_config_irq0(struct i40e_hw *hw)
3855 /* read pending request and disable first */
3856 i40e_pf_disable_irq0(hw);
3857 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
3858 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
3859 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
3861 /* Link no queues with irq0 */
3862 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
3863 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
3867 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
3869 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3870 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3873 uint32_t index, offset, val;
3878 * Try to find which VF trigger a reset, use absolute VF id to access
3879 * since the reg is global register.
3881 for (i = 0; i < pf->vf_num; i++) {
3882 abs_vf_id = hw->func_caps.vf_base_id + i;
3883 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
3884 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
3885 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
3886 /* VFR event occured */
3887 if (val & (0x1 << offset)) {
3890 /* Clear the event first */
3891 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
3893 PMD_DRV_LOG(INFO, "VF %u reset occured", abs_vf_id);
3895 * Only notify a VF reset event occured,
3896 * don't trigger another SW reset
3898 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
3899 if (ret != I40E_SUCCESS)
3900 PMD_DRV_LOG(ERR, "Failed to do VF reset");
3906 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
3908 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3909 struct i40e_arq_event_info info;
3910 uint16_t pending, opcode;
3913 info.buf_len = I40E_AQ_BUF_SZ;
3914 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
3915 if (!info.msg_buf) {
3916 PMD_DRV_LOG(ERR, "Failed to allocate mem");
3922 ret = i40e_clean_arq_element(hw, &info, &pending);
3924 if (ret != I40E_SUCCESS) {
3925 PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
3926 "aq_err: %u", hw->aq.asq_last_status);
3929 opcode = rte_le_to_cpu_16(info.desc.opcode);
3932 case i40e_aqc_opc_send_msg_to_pf:
3933 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
3934 i40e_pf_host_handle_vf_msg(dev,
3935 rte_le_to_cpu_16(info.desc.retval),
3936 rte_le_to_cpu_32(info.desc.cookie_high),
3937 rte_le_to_cpu_32(info.desc.cookie_low),
3942 PMD_DRV_LOG(ERR, "Request %u is not supported yet",
3947 rte_free(info.msg_buf);
3951 * Interrupt handler is registered as the alarm callback for handling LSC
3952 * interrupt in a definite of time, in order to wait the NIC into a stable
3953 * state. Currently it waits 1 sec in i40e for the link up interrupt, and
3954 * no need for link down interrupt.
3957 i40e_dev_interrupt_delayed_handler(void *param)
3959 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
3960 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3963 /* read interrupt causes again */
3964 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
3966 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
3967 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
3968 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
3969 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
3970 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
3971 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
3972 PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
3973 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
3974 PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
3975 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
3976 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
3978 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
3979 PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
3980 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
3981 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
3982 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
3984 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3985 PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
3986 i40e_dev_handle_vfr_event(dev);
3988 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3989 PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
3990 i40e_dev_handle_aq_msg(dev);
3993 /* handle the link up interrupt in an alarm callback */
3994 i40e_dev_link_update(dev, 0);
3995 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
3997 i40e_pf_enable_irq0(hw);
3998 rte_intr_enable(&(dev->pci_dev->intr_handle));
4002 * Interrupt handler triggered by NIC for handling
4003 * specific interrupt.
4006 * Pointer to interrupt handle.
4008 * The address of parameter (struct rte_eth_dev *) regsitered before.
4014 i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
4017 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
4018 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4021 /* Disable interrupt */
4022 i40e_pf_disable_irq0(hw);
4024 /* read out interrupt causes */
4025 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
4027 /* No interrupt event indicated */
4028 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
4029 PMD_DRV_LOG(INFO, "No interrupt event");
4032 #ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
4033 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
4034 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
4035 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
4036 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
4037 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
4038 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
4039 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
4040 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
4041 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
4042 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
4043 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
4044 PMD_DRV_LOG(ERR, "ICR0: HMC error");
4045 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
4046 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
4047 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
4049 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4050 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
4051 i40e_dev_handle_vfr_event(dev);
4053 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4054 PMD_DRV_LOG(INFO, "ICR0: adminq event");
4055 i40e_dev_handle_aq_msg(dev);
4058 /* Link Status Change interrupt */
4059 if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
4060 #define I40E_US_PER_SECOND 1000000
4061 struct rte_eth_link link;
4063 PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
4064 memset(&link, 0, sizeof(link));
4065 rte_i40e_dev_atomic_read_link_status(dev, &link);
4066 i40e_dev_link_update(dev, 0);
4069 * For link up interrupt, it needs to wait 1 second to let the
4070 * hardware be a stable state. Otherwise several consecutive
4071 * interrupts can be observed.
4072 * For link down interrupt, no need to wait.
4074 if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
4075 i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
4078 _rte_eth_dev_callback_process(dev,
4079 RTE_ETH_EVENT_INTR_LSC);
4083 /* Enable interrupt */
4084 i40e_pf_enable_irq0(hw);
4085 rte_intr_enable(&(dev->pci_dev->intr_handle));
4089 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
4090 struct i40e_macvlan_filter *filter,
4093 int ele_num, ele_buff_size;
4094 int num, actual_num, i;
4096 int ret = I40E_SUCCESS;
4097 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4098 struct i40e_aqc_add_macvlan_element_data *req_list;
4100 if (filter == NULL || total == 0)
4101 return I40E_ERR_PARAM;
4102 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4103 ele_buff_size = hw->aq.asq_buf_size;
4105 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
4106 if (req_list == NULL) {
4107 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4108 return I40E_ERR_NO_MEMORY;
4113 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4114 memset(req_list, 0, ele_buff_size);
4116 for (i = 0; i < actual_num; i++) {
4117 (void)rte_memcpy(req_list[i].mac_addr,
4118 &filter[num + i].macaddr, ETH_ADDR_LEN);
4119 req_list[i].vlan_tag =
4120 rte_cpu_to_le_16(filter[num + i].vlan_id);
4122 switch (filter[num + i].filter_type) {
4123 case RTE_MAC_PERFECT_MATCH:
4124 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
4125 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4127 case RTE_MACVLAN_PERFECT_MATCH:
4128 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
4130 case RTE_MAC_HASH_MATCH:
4131 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
4132 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
4134 case RTE_MACVLAN_HASH_MATCH:
4135 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
4138 PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
4139 ret = I40E_ERR_PARAM;
4143 req_list[i].queue_number = 0;
4145 req_list[i].flags = rte_cpu_to_le_16(flags);
4148 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
4150 if (ret != I40E_SUCCESS) {
4151 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
4155 } while (num < total);
4163 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
4164 struct i40e_macvlan_filter *filter,
4167 int ele_num, ele_buff_size;
4168 int num, actual_num, i;
4170 int ret = I40E_SUCCESS;
4171 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4172 struct i40e_aqc_remove_macvlan_element_data *req_list;
4174 if (filter == NULL || total == 0)
4175 return I40E_ERR_PARAM;
4177 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
4178 ele_buff_size = hw->aq.asq_buf_size;
4180 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
4181 if (req_list == NULL) {
4182 PMD_DRV_LOG(ERR, "Fail to allocate memory");
4183 return I40E_ERR_NO_MEMORY;
4188 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
4189 memset(req_list, 0, ele_buff_size);
4191 for (i = 0; i < actual_num; i++) {
4192 (void)rte_memcpy(req_list[i].mac_addr,
4193 &filter[num + i].macaddr, ETH_ADDR_LEN);
4194 req_list[i].vlan_tag =
4195 rte_cpu_to_le_16(filter[num + i].vlan_id);
4197 switch (filter[num + i].filter_type) {
4198 case RTE_MAC_PERFECT_MATCH:
4199 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
4200 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4202 case RTE_MACVLAN_PERFECT_MATCH:
4203 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
4205 case RTE_MAC_HASH_MATCH:
4206 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
4207 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
4209 case RTE_MACVLAN_HASH_MATCH:
4210 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
4213 PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
4214 ret = I40E_ERR_PARAM;
4217 req_list[i].flags = rte_cpu_to_le_16(flags);
4220 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
4222 if (ret != I40E_SUCCESS) {
4223 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
4227 } while (num < total);
4234 /* Find out specific MAC filter */
4235 static struct i40e_mac_filter *
4236 i40e_find_mac_filter(struct i40e_vsi *vsi,
4237 struct ether_addr *macaddr)
4239 struct i40e_mac_filter *f;
4241 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4242 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
4250 i40e_find_vlan_filter(struct i40e_vsi *vsi,
4253 uint32_t vid_idx, vid_bit;
4255 if (vlan_id > ETH_VLAN_ID_MAX)
4258 vid_idx = I40E_VFTA_IDX(vlan_id);
4259 vid_bit = I40E_VFTA_BIT(vlan_id);
4261 if (vsi->vfta[vid_idx] & vid_bit)
4268 i40e_set_vlan_filter(struct i40e_vsi *vsi,
4269 uint16_t vlan_id, bool on)
4271 uint32_t vid_idx, vid_bit;
4273 if (vlan_id > ETH_VLAN_ID_MAX)
4276 vid_idx = I40E_VFTA_IDX(vlan_id);
4277 vid_bit = I40E_VFTA_BIT(vlan_id);
4280 vsi->vfta[vid_idx] |= vid_bit;
4282 vsi->vfta[vid_idx] &= ~vid_bit;
4286 * Find all vlan options for specific mac addr,
4287 * return with actual vlan found.
4290 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
4291 struct i40e_macvlan_filter *mv_f,
4292 int num, struct ether_addr *addr)
4298 * Not to use i40e_find_vlan_filter to decrease the loop time,
4299 * although the code looks complex.
4301 if (num < vsi->vlan_num)
4302 return I40E_ERR_PARAM;
4305 for (j = 0; j < I40E_VFTA_SIZE; j++) {
4307 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
4308 if (vsi->vfta[j] & (1 << k)) {
4310 PMD_DRV_LOG(ERR, "vlan number "
4312 return I40E_ERR_PARAM;
4314 (void)rte_memcpy(&mv_f[i].macaddr,
4315 addr, ETH_ADDR_LEN);
4317 j * I40E_UINT32_BIT_SIZE + k;
4323 return I40E_SUCCESS;
4327 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
4328 struct i40e_macvlan_filter *mv_f,
4333 struct i40e_mac_filter *f;
4335 if (num < vsi->mac_num)
4336 return I40E_ERR_PARAM;
4338 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4340 PMD_DRV_LOG(ERR, "buffer number not match");
4341 return I40E_ERR_PARAM;
4343 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4345 mv_f[i].vlan_id = vlan;
4346 mv_f[i].filter_type = f->mac_info.filter_type;
4350 return I40E_SUCCESS;
4354 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
4357 struct i40e_mac_filter *f;
4358 struct i40e_macvlan_filter *mv_f;
4359 int ret = I40E_SUCCESS;
4361 if (vsi == NULL || vsi->mac_num == 0)
4362 return I40E_ERR_PARAM;
4364 /* Case that no vlan is set */
4365 if (vsi->vlan_num == 0)
4368 num = vsi->mac_num * vsi->vlan_num;
4370 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
4372 PMD_DRV_LOG(ERR, "failed to allocate memory");
4373 return I40E_ERR_NO_MEMORY;
4377 if (vsi->vlan_num == 0) {
4378 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4379 (void)rte_memcpy(&mv_f[i].macaddr,
4380 &f->mac_info.mac_addr, ETH_ADDR_LEN);
4381 mv_f[i].vlan_id = 0;
4385 TAILQ_FOREACH(f, &vsi->mac_list, next) {
4386 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
4387 vsi->vlan_num, &f->mac_info.mac_addr);
4388 if (ret != I40E_SUCCESS)
4394 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
4402 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4404 struct i40e_macvlan_filter *mv_f;
4406 int ret = I40E_SUCCESS;
4408 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
4409 return I40E_ERR_PARAM;
4411 /* If it's already set, just return */
4412 if (i40e_find_vlan_filter(vsi,vlan))
4413 return I40E_SUCCESS;
4415 mac_num = vsi->mac_num;
4418 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4419 return I40E_ERR_PARAM;
4422 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4425 PMD_DRV_LOG(ERR, "failed to allocate memory");
4426 return I40E_ERR_NO_MEMORY;
4429 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4431 if (ret != I40E_SUCCESS)
4434 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4436 if (ret != I40E_SUCCESS)
4439 i40e_set_vlan_filter(vsi, vlan, 1);
4449 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
4451 struct i40e_macvlan_filter *mv_f;
4453 int ret = I40E_SUCCESS;
4456 * Vlan 0 is the generic filter for untagged packets
4457 * and can't be removed.
4459 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
4460 return I40E_ERR_PARAM;
4462 /* If can't find it, just return */
4463 if (!i40e_find_vlan_filter(vsi, vlan))
4464 return I40E_ERR_PARAM;
4466 mac_num = vsi->mac_num;
4469 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
4470 return I40E_ERR_PARAM;
4473 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
4476 PMD_DRV_LOG(ERR, "failed to allocate memory");
4477 return I40E_ERR_NO_MEMORY;
4480 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
4482 if (ret != I40E_SUCCESS)
4485 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
4487 if (ret != I40E_SUCCESS)
4490 /* This is last vlan to remove, replace all mac filter with vlan 0 */
4491 if (vsi->vlan_num == 1) {
4492 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
4493 if (ret != I40E_SUCCESS)
4496 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
4497 if (ret != I40E_SUCCESS)
4501 i40e_set_vlan_filter(vsi, vlan, 0);
4511 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
4513 struct i40e_mac_filter *f;
4514 struct i40e_macvlan_filter *mv_f;
4515 int i, vlan_num = 0;
4516 int ret = I40E_SUCCESS;
4518 /* If it's add and we've config it, return */
4519 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
4521 return I40E_SUCCESS;
4522 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
4523 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
4526 * If vlan_num is 0, that's the first time to add mac,
4527 * set mask for vlan_id 0.
4529 if (vsi->vlan_num == 0) {
4530 i40e_set_vlan_filter(vsi, 0, 1);
4533 vlan_num = vsi->vlan_num;
4534 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
4535 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
4538 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4540 PMD_DRV_LOG(ERR, "failed to allocate memory");
4541 return I40E_ERR_NO_MEMORY;
4544 for (i = 0; i < vlan_num; i++) {
4545 mv_f[i].filter_type = mac_filter->filter_type;
4546 (void)rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
4550 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4551 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
4552 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
4553 &mac_filter->mac_addr);
4554 if (ret != I40E_SUCCESS)
4558 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
4559 if (ret != I40E_SUCCESS)
4562 /* Add the mac addr into mac list */
4563 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
4565 PMD_DRV_LOG(ERR, "failed to allocate memory");
4566 ret = I40E_ERR_NO_MEMORY;
4569 (void)rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
4571 f->mac_info.filter_type = mac_filter->filter_type;
4572 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
4583 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
4585 struct i40e_mac_filter *f;
4586 struct i40e_macvlan_filter *mv_f;
4588 enum rte_mac_filter_type filter_type;
4589 int ret = I40E_SUCCESS;
4591 /* Can't find it, return an error */
4592 f = i40e_find_mac_filter(vsi, addr);
4594 return I40E_ERR_PARAM;
4596 vlan_num = vsi->vlan_num;
4597 filter_type = f->mac_info.filter_type;
4598 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4599 filter_type == RTE_MACVLAN_HASH_MATCH) {
4600 if (vlan_num == 0) {
4601 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
4602 return I40E_ERR_PARAM;
4604 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
4605 filter_type == RTE_MAC_HASH_MATCH)
4608 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
4610 PMD_DRV_LOG(ERR, "failed to allocate memory");
4611 return I40E_ERR_NO_MEMORY;
4614 for (i = 0; i < vlan_num; i++) {
4615 mv_f[i].filter_type = filter_type;
4616 (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
4619 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
4620 filter_type == RTE_MACVLAN_HASH_MATCH) {
4621 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
4622 if (ret != I40E_SUCCESS)
4626 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
4627 if (ret != I40E_SUCCESS)
4630 /* Remove the mac addr into mac list */
4631 TAILQ_REMOVE(&vsi->mac_list, f, next);
4641 /* Configure hash enable flags for RSS */
4643 i40e_config_hena(uint64_t flags)
4650 if (flags & ETH_RSS_FRAG_IPV4)
4651 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
4652 if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
4653 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
4654 if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
4655 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4656 if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
4657 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
4658 if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
4659 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
4660 if (flags & ETH_RSS_FRAG_IPV6)
4661 hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
4662 if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
4663 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
4664 if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
4665 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
4666 if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
4667 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
4668 if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
4669 hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
4670 if (flags & ETH_RSS_L2_PAYLOAD)
4671 hena |= 1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD;
4676 /* Parse the hash enable flags */
4678 i40e_parse_hena(uint64_t flags)
4680 uint64_t rss_hf = 0;
4684 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4))
4685 rss_hf |= ETH_RSS_FRAG_IPV4;
4686 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
4687 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
4688 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
4689 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
4690 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
4691 rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
4692 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
4693 rss_hf |= ETH_RSS_NONFRAG_IPV4_OTHER;
4694 if (flags & (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6))
4695 rss_hf |= ETH_RSS_FRAG_IPV6;
4696 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
4697 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
4698 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
4699 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
4700 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
4701 rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
4702 if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
4703 rss_hf |= ETH_RSS_NONFRAG_IPV6_OTHER;
4704 if (flags & (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD))
4705 rss_hf |= ETH_RSS_L2_PAYLOAD;
4712 i40e_pf_disable_rss(struct i40e_pf *pf)
4714 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4717 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4718 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4719 hena &= ~I40E_RSS_HENA_ALL;
4720 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4721 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4722 I40E_WRITE_FLUSH(hw);
4726 i40e_hw_rss_hash_set(struct i40e_hw *hw, struct rte_eth_rss_conf *rss_conf)
4729 uint8_t hash_key_len;
4734 hash_key = (uint32_t *)(rss_conf->rss_key);
4735 hash_key_len = rss_conf->rss_key_len;
4736 if (hash_key != NULL && hash_key_len >=
4737 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
4738 /* Fill in RSS hash key */
4739 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4740 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]);
4743 rss_hf = rss_conf->rss_hf;
4744 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4745 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4746 hena &= ~I40E_RSS_HENA_ALL;
4747 hena |= i40e_config_hena(rss_hf);
4748 I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
4749 I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
4750 I40E_WRITE_FLUSH(hw);
4756 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
4757 struct rte_eth_rss_conf *rss_conf)
4759 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4760 uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
4763 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4764 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4765 if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
4766 if (rss_hf != 0) /* Enable RSS */
4768 return 0; /* Nothing to do */
4771 if (rss_hf == 0) /* Disable RSS */
4774 return i40e_hw_rss_hash_set(hw, rss_conf);
4778 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
4779 struct rte_eth_rss_conf *rss_conf)
4781 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4782 uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
4786 if (hash_key != NULL) {
4787 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
4788 hash_key[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i));
4789 rss_conf->rss_key_len = i * sizeof(uint32_t);
4791 hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0));
4792 hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32;
4793 rss_conf->rss_hf = i40e_parse_hena(hena);
4799 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
4801 switch (filter_type) {
4802 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
4803 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
4805 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
4806 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
4808 case RTE_TUNNEL_FILTER_IMAC_TENID:
4809 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
4811 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
4812 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
4814 case ETH_TUNNEL_FILTER_IMAC:
4815 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
4818 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
4826 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
4827 struct rte_eth_tunnel_filter_conf *tunnel_filter,
4831 uint8_t tun_type = 0;
4833 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4834 struct i40e_vsi *vsi = pf->main_vsi;
4835 struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
4836 struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
4838 cld_filter = rte_zmalloc("tunnel_filter",
4839 sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
4842 if (NULL == cld_filter) {
4843 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4846 pfilter = cld_filter;
4848 (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac,
4849 sizeof(struct ether_addr));
4850 (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac,
4851 sizeof(struct ether_addr));
4853 pfilter->inner_vlan = tunnel_filter->inner_vlan;
4854 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
4855 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
4856 (void)rte_memcpy(&pfilter->ipaddr.v4.data,
4857 &tunnel_filter->ip_addr,
4858 sizeof(pfilter->ipaddr.v4.data));
4860 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
4861 (void)rte_memcpy(&pfilter->ipaddr.v6.data,
4862 &tunnel_filter->ip_addr,
4863 sizeof(pfilter->ipaddr.v6.data));
4866 /* check tunneled type */
4867 switch (tunnel_filter->tunnel_type) {
4868 case RTE_TUNNEL_TYPE_VXLAN:
4869 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN;
4871 case RTE_TUNNEL_TYPE_NVGRE:
4872 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
4875 /* Other tunnel types is not supported. */
4876 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
4877 rte_free(cld_filter);
4881 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
4884 rte_free(cld_filter);
4888 pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type |
4889 (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
4890 pfilter->tenant_id = tunnel_filter->tenant_id;
4891 pfilter->queue_number = tunnel_filter->queue_id;
4894 ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
4896 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4899 rte_free(cld_filter);
4904 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
4908 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
4909 if (pf->vxlan_ports[i] == port)
4917 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
4921 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4923 idx = i40e_get_vxlan_port_idx(pf, port);
4925 /* Check if port already exists */
4927 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
4931 /* Now check if there is space to add the new port */
4932 idx = i40e_get_vxlan_port_idx(pf, 0);
4934 PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
4935 "not adding port %d", port);
4939 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
4942 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
4946 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
4949 /* New port: add it and mark its index in the bitmap */
4950 pf->vxlan_ports[idx] = port;
4951 pf->vxlan_bitmap |= (1 << idx);
4953 if (!(pf->flags & I40E_FLAG_VXLAN))
4954 pf->flags |= I40E_FLAG_VXLAN;
4960 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
4963 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4965 if (!(pf->flags & I40E_FLAG_VXLAN)) {
4966 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
4970 idx = i40e_get_vxlan_port_idx(pf, port);
4973 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
4977 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
4978 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
4982 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
4985 pf->vxlan_ports[idx] = 0;
4986 pf->vxlan_bitmap &= ~(1 << idx);
4988 if (!pf->vxlan_bitmap)
4989 pf->flags &= ~I40E_FLAG_VXLAN;
4994 /* Add UDP tunneling port */
4996 i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
4997 struct rte_eth_udp_tunnel *udp_tunnel)
5000 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5002 if (udp_tunnel == NULL)
5005 switch (udp_tunnel->prot_type) {
5006 case RTE_TUNNEL_TYPE_VXLAN:
5007 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
5010 case RTE_TUNNEL_TYPE_GENEVE:
5011 case RTE_TUNNEL_TYPE_TEREDO:
5012 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5017 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5025 /* Remove UDP tunneling port */
5027 i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
5028 struct rte_eth_udp_tunnel *udp_tunnel)
5031 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5033 if (udp_tunnel == NULL)
5036 switch (udp_tunnel->prot_type) {
5037 case RTE_TUNNEL_TYPE_VXLAN:
5038 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
5040 case RTE_TUNNEL_TYPE_GENEVE:
5041 case RTE_TUNNEL_TYPE_TEREDO:
5042 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
5046 PMD_DRV_LOG(ERR, "Invalid tunnel type");
5054 /* Calculate the maximum number of contiguous PF queues that are configured */
5056 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
5058 struct rte_eth_dev_data *data = pf->dev_data;
5060 struct i40e_rx_queue *rxq;
5063 for (i = 0; i < pf->lan_nb_qps; i++) {
5064 rxq = data->rx_queues[i];
5065 if (rxq && rxq->q_set)
5076 i40e_pf_config_rss(struct i40e_pf *pf)
5078 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5079 struct rte_eth_rss_conf rss_conf;
5080 uint32_t i, lut = 0;
5084 * If both VMDQ and RSS enabled, not all of PF queues are configured.
5085 * It's necessary to calulate the actual PF queues that are configured.
5087 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
5088 num = i40e_pf_calc_configured_queues_num(pf);
5089 num = i40e_align_floor(num);
5091 num = i40e_align_floor(pf->dev_data->nb_rx_queues);
5093 PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
5097 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
5101 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
5104 lut = (lut << 8) | (j & ((0x1 <<
5105 hw->func_caps.rss_table_entry_width) - 1));
5107 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
5110 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
5111 if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
5112 i40e_pf_disable_rss(pf);
5115 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
5116 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
5117 /* Random default keys */
5118 static uint32_t rss_key_default[] = {0x6b793944,
5119 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
5120 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
5121 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
5123 rss_conf.rss_key = (uint8_t *)rss_key_default;
5124 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
5128 return i40e_hw_rss_hash_set(hw, &rss_conf);
5132 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
5133 struct rte_eth_tunnel_filter_conf *filter)
5135 if (pf == NULL || filter == NULL) {
5136 PMD_DRV_LOG(ERR, "Invalid parameter");
5140 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
5141 PMD_DRV_LOG(ERR, "Invalid queue ID");
5145 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
5146 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
5150 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
5151 (is_zero_ether_addr(filter->outer_mac))) {
5152 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
5156 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
5157 (is_zero_ether_addr(filter->inner_mac))) {
5158 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
5166 i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5169 struct rte_eth_tunnel_filter_conf *filter;
5170 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5171 int ret = I40E_SUCCESS;
5173 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
5175 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
5176 return I40E_ERR_PARAM;
5178 switch (filter_op) {
5179 case RTE_ETH_FILTER_NOP:
5180 if (!(pf->flags & I40E_FLAG_VXLAN))
5181 ret = I40E_NOT_SUPPORTED;
5182 case RTE_ETH_FILTER_ADD:
5183 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
5185 case RTE_ETH_FILTER_DELETE:
5186 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
5189 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
5190 ret = I40E_ERR_PARAM;
5198 i40e_pf_config_mq_rx(struct i40e_pf *pf)
5201 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
5203 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
5204 PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
5209 if (mq_mode & ETH_MQ_RX_RSS_FLAG)
5210 ret = i40e_pf_config_rss(pf);
5212 i40e_pf_disable_rss(pf);
5217 /* Get the symmetric hash enable configurations per port */
5219 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
5221 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5223 *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
5226 /* Set the symmetric hash enable configurations per port */
5228 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
5230 uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0);
5233 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
5234 PMD_DRV_LOG(INFO, "Symmetric hash has already "
5238 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5240 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
5241 PMD_DRV_LOG(INFO, "Symmetric hash has already "
5245 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
5247 I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg);
5248 I40E_WRITE_FLUSH(hw);
5252 * Get global configurations of hash function type and symmetric hash enable
5253 * per flow type (pctype). Note that global configuration means it affects all
5254 * the ports on the same NIC.
5257 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
5258 struct rte_eth_hash_global_conf *g_cfg)
5260 uint32_t reg, mask = I40E_FLOW_TYPES;
5262 enum i40e_filter_pctype pctype;
5264 memset(g_cfg, 0, sizeof(*g_cfg));
5265 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5266 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
5267 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
5269 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
5270 PMD_DRV_LOG(DEBUG, "Hash function is %s",
5271 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
5273 for (i = 0; mask && i < RTE_ETH_FLOW_MAX; i++) {
5274 if (!(mask & (1UL << i)))
5276 mask &= ~(1UL << i);
5277 /* Bit set indicats the coresponding flow type is supported */
5278 g_cfg->valid_bit_mask[0] |= (1UL << i);
5279 pctype = i40e_flowtype_to_pctype(i);
5280 reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype));
5281 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK)
5282 g_cfg->sym_hash_enable_mask[0] |= (1UL << i);
5289 i40e_hash_global_config_check(struct rte_eth_hash_global_conf *g_cfg)
5292 uint32_t mask0, i40e_mask = I40E_FLOW_TYPES;
5294 if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
5295 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5296 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
5297 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
5303 * As i40e supports less than 32 flow types, only first 32 bits need to
5306 mask0 = g_cfg->valid_bit_mask[0];
5307 for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
5309 /* Check if any unsupported flow type configured */
5310 if ((mask0 | i40e_mask) ^ i40e_mask)
5313 if (g_cfg->valid_bit_mask[i])
5321 PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
5327 * Set global configurations of hash function type and symmetric hash enable
5328 * per flow type (pctype). Note any modifying global configuration will affect
5329 * all the ports on the same NIC.
5332 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
5333 struct rte_eth_hash_global_conf *g_cfg)
5338 uint32_t mask0 = g_cfg->valid_bit_mask[0];
5339 enum i40e_filter_pctype pctype;
5341 /* Check the input parameters */
5342 ret = i40e_hash_global_config_check(g_cfg);
5346 for (i = 0; mask0 && i < UINT32_BIT; i++) {
5347 if (!(mask0 & (1UL << i)))
5349 mask0 &= ~(1UL << i);
5350 pctype = i40e_flowtype_to_pctype(i);
5351 reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
5352 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
5353 I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg);
5356 reg = I40E_READ_REG(hw, I40E_GLQF_CTL);
5357 if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
5359 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
5360 PMD_DRV_LOG(DEBUG, "Hash function already set to "
5364 reg |= I40E_GLQF_CTL_HTOEP_MASK;
5365 } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
5367 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
5368 PMD_DRV_LOG(DEBUG, "Hash function already set to "
5372 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
5374 /* Use the default, and keep it as it is */
5377 I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg);
5380 I40E_WRITE_FLUSH(hw);
5386 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5391 PMD_DRV_LOG(ERR, "Invalid pointer");
5395 switch (info->info_type) {
5396 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5397 i40e_get_symmetric_hash_enable_per_port(hw,
5398 &(info->info.enable));
5400 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5401 ret = i40e_get_hash_filter_global_config(hw,
5402 &(info->info.global_conf));
5405 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5415 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
5420 PMD_DRV_LOG(ERR, "Invalid pointer");
5424 switch (info->info_type) {
5425 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
5426 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
5428 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
5429 ret = i40e_set_hash_filter_global_config(hw,
5430 &(info->info.global_conf));
5433 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
5442 /* Operations for hash function */
5444 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
5445 enum rte_filter_op filter_op,
5448 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5451 switch (filter_op) {
5452 case RTE_ETH_FILTER_NOP:
5454 case RTE_ETH_FILTER_GET:
5455 ret = i40e_hash_filter_get(hw,
5456 (struct rte_eth_hash_filter_info *)arg);
5458 case RTE_ETH_FILTER_SET:
5459 ret = i40e_hash_filter_set(hw,
5460 (struct rte_eth_hash_filter_info *)arg);
5463 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
5473 * Configure ethertype filter, which can director packet by filtering
5474 * with mac address and ether_type or only ether_type
5477 i40e_ethertype_filter_set(struct i40e_pf *pf,
5478 struct rte_eth_ethertype_filter *filter,
5481 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5482 struct i40e_control_filter_stats stats;
5486 if (filter->queue >= pf->dev_data->nb_rx_queues) {
5487 PMD_DRV_LOG(ERR, "Invalid queue ID");
5490 if (filter->ether_type == ETHER_TYPE_IPv4 ||
5491 filter->ether_type == ETHER_TYPE_IPv6) {
5492 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
5493 " control packet filter.", filter->ether_type);
5496 if (filter->ether_type == ETHER_TYPE_VLAN)
5497 PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
5500 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5501 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5502 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5503 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5504 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5506 memset(&stats, 0, sizeof(stats));
5507 ret = i40e_aq_add_rem_control_packet_filter(hw,
5508 filter->mac_addr.addr_bytes,
5509 filter->ether_type, flags,
5511 filter->queue, add, &stats, NULL);
5513 PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
5514 " mac_etype_used = %u, etype_used = %u,"
5515 " mac_etype_free = %u, etype_free = %u\n",
5516 ret, stats.mac_etype_used, stats.etype_used,
5517 stats.mac_etype_free, stats.etype_free);
5524 * Handle operations for ethertype filter.
5527 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
5528 enum rte_filter_op filter_op,
5531 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5534 if (filter_op == RTE_ETH_FILTER_NOP)
5538 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
5543 switch (filter_op) {
5544 case RTE_ETH_FILTER_ADD:
5545 ret = i40e_ethertype_filter_set(pf,
5546 (struct rte_eth_ethertype_filter *)arg,
5549 case RTE_ETH_FILTER_DELETE:
5550 ret = i40e_ethertype_filter_set(pf,
5551 (struct rte_eth_ethertype_filter *)arg,
5555 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
5563 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
5564 enum rte_filter_type filter_type,
5565 enum rte_filter_op filter_op,
5573 switch (filter_type) {
5574 case RTE_ETH_FILTER_HASH:
5575 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
5577 case RTE_ETH_FILTER_MACVLAN:
5578 ret = i40e_mac_filter_handle(dev, filter_op, arg);
5580 case RTE_ETH_FILTER_ETHERTYPE:
5581 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
5583 case RTE_ETH_FILTER_TUNNEL:
5584 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
5586 case RTE_ETH_FILTER_FDIR:
5587 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
5590 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5600 * As some registers wouldn't be reset unless a global hardware reset,
5601 * hardware initialization is needed to put those registers into an
5602 * expected initial state.
5605 i40e_hw_init(struct i40e_hw *hw)
5607 /* clear the PF Queue Filter control register */
5608 I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0);
5610 /* Disable symmetric hash per port */
5611 i40e_set_symmetric_hash_enable_per_port(hw, 0);
5614 enum i40e_filter_pctype
5615 i40e_flowtype_to_pctype(uint16_t flow_type)
5617 static const enum i40e_filter_pctype pctype_table[] = {
5618 [RTE_ETH_FLOW_FRAG_IPV4] = I40E_FILTER_PCTYPE_FRAG_IPV4,
5619 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
5620 I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
5621 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
5622 I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
5623 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
5624 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
5625 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
5626 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5627 [RTE_ETH_FLOW_FRAG_IPV6] = I40E_FILTER_PCTYPE_FRAG_IPV6,
5628 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
5629 I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
5630 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
5631 I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
5632 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
5633 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
5634 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
5635 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
5636 [RTE_ETH_FLOW_L2_PAYLOAD] = I40E_FILTER_PCTYPE_L2_PAYLOAD,
5639 return pctype_table[flow_type];
5643 i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
5645 static const uint16_t flowtype_table[] = {
5646 [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
5647 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
5648 RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
5649 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
5650 RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
5651 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
5652 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
5653 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
5654 RTE_ETH_FLOW_NONFRAG_IPV4_OTHER,
5655 [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
5656 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
5657 RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
5658 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
5659 RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
5660 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
5661 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
5662 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
5663 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER,
5664 [I40E_FILTER_PCTYPE_L2_PAYLOAD] = RTE_ETH_FLOW_L2_PAYLOAD,
5667 return flowtype_table[pctype];
5671 * On X710, performance number is far from the expectation on recent firmware
5672 * versions; on XL710, performance number is also far from the expectation on
5673 * recent firmware versions, if promiscuous mode is disabled, or promiscuous
5674 * mode is enabled and port MAC address is equal to the packet destination MAC
5675 * address. The fix for this issue may not be integrated in the following
5676 * firmware version. So the workaround in software driver is needed. It needs
5677 * to modify the initial values of 3 internal only registers for both X710 and
5678 * XL710. Note that the values for X710 or XL710 could be different, and the
5679 * workaround can be removed when it is fixed in firmware in the future.
5682 /* For both X710 and XL710 */
5683 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
5684 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
5686 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
5687 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
5690 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
5692 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
5693 #define I40E_GL_SWR_PM_UP_THR 0x269FBC
5696 i40e_configure_registers(struct i40e_hw *hw)
5702 {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
5703 {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
5704 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
5710 for (i = 0; i < RTE_DIM(reg_table); i++) {
5711 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
5712 if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
5714 I40E_GL_SWR_PM_UP_THR_SF_VALUE;
5717 I40E_GL_SWR_PM_UP_THR_EF_VALUE;
5720 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
5723 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
5727 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
5728 reg_table[i].addr, reg);
5729 if (reg == reg_table[i].val)
5732 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
5733 reg_table[i].val, NULL);
5735 PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
5736 "address of 0x%"PRIx32, reg_table[i].val,
5740 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
5741 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
5745 #define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
5746 #define I40E_VSI_TSR_QINQ_CONFIG 0xc030
5747 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
5748 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
5750 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
5755 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
5756 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
5760 /* Configure for double VLAN RX stripping */
5761 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
5762 if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
5763 reg |= I40E_VSI_TSR_QINQ_CONFIG;
5764 ret = i40e_aq_debug_write_register(hw,
5765 I40E_VSI_TSR(vsi->vsi_id),
5768 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
5770 return I40E_ERR_CONFIG;
5774 /* Configure for double VLAN TX insertion */
5775 reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
5776 if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
5777 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
5778 ret = i40e_aq_debug_write_register(hw,
5779 I40E_VSI_L2TAGSTXVALID(
5780 vsi->vsi_id), reg, NULL);
5782 PMD_DRV_LOG(ERR, "Failed to update "
5783 "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
5784 return I40E_ERR_CONFIG;
5792 * i40e_aq_add_mirror_rule
5793 * @hw: pointer to the hardware structure
5794 * @seid: VEB seid to add mirror rule to
5795 * @dst_id: destination vsi seid
5796 * @entries: Buffer which contains the entities to be mirrored
5797 * @count: number of entities contained in the buffer
5798 * @rule_id:the rule_id of the rule to be added
5800 * Add a mirror rule for a given veb.
5803 static enum i40e_status_code
5804 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
5805 uint16_t seid, uint16_t dst_id,
5806 uint16_t rule_type, uint16_t *entries,
5807 uint16_t count, uint16_t *rule_id)
5809 struct i40e_aq_desc desc;
5810 struct i40e_aqc_add_delete_mirror_rule cmd;
5811 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
5812 (struct i40e_aqc_add_delete_mirror_rule_completion *)
5815 enum i40e_status_code status;
5817 i40e_fill_default_direct_cmd_desc(&desc,
5818 i40e_aqc_opc_add_mirror_rule);
5819 memset(&cmd, 0, sizeof(cmd));
5821 buff_len = sizeof(uint16_t) * count;
5822 desc.datalen = rte_cpu_to_le_16(buff_len);
5824 desc.flags |= rte_cpu_to_le_16(
5825 (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
5826 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5827 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5828 cmd.num_entries = rte_cpu_to_le_16(count);
5829 cmd.seid = rte_cpu_to_le_16(seid);
5830 cmd.destination = rte_cpu_to_le_16(dst_id);
5832 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5833 status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
5834 PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
5836 " mirror_rules_used = %u, mirror_rules_free = %u,",
5837 hw->aq.asq_last_status, resp->rule_id,
5838 resp->mirror_rules_used, resp->mirror_rules_free);
5839 *rule_id = rte_le_to_cpu_16(resp->rule_id);
5845 * i40e_aq_del_mirror_rule
5846 * @hw: pointer to the hardware structure
5847 * @seid: VEB seid to add mirror rule to
5848 * @entries: Buffer which contains the entities to be mirrored
5849 * @count: number of entities contained in the buffer
5850 * @rule_id:the rule_id of the rule to be delete
5852 * Delete a mirror rule for a given veb.
5855 static enum i40e_status_code
5856 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
5857 uint16_t seid, uint16_t rule_type, uint16_t *entries,
5858 uint16_t count, uint16_t rule_id)
5860 struct i40e_aq_desc desc;
5861 struct i40e_aqc_add_delete_mirror_rule cmd;
5862 uint16_t buff_len = 0;
5863 enum i40e_status_code status;
5866 i40e_fill_default_direct_cmd_desc(&desc,
5867 i40e_aqc_opc_delete_mirror_rule);
5868 memset(&cmd, 0, sizeof(cmd));
5869 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
5870 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
5872 cmd.num_entries = count;
5873 buff_len = sizeof(uint16_t) * count;
5874 desc.datalen = rte_cpu_to_le_16(buff_len);
5875 buff = (void *)entries;
5877 /* rule id is filled in destination field for deleting mirror rule */
5878 cmd.destination = rte_cpu_to_le_16(rule_id);
5880 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
5881 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
5882 cmd.seid = rte_cpu_to_le_16(seid);
5884 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
5885 status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
5891 * i40e_mirror_rule_set
5892 * @dev: pointer to the hardware structure
5893 * @mirror_conf: mirror rule info
5894 * @sw_id: mirror rule's sw_id
5895 * @on: enable/disable
5897 * set a mirror rule.
5901 i40e_mirror_rule_set(struct rte_eth_dev *dev,
5902 struct rte_eth_mirror_conf *mirror_conf,
5903 uint8_t sw_id, uint8_t on)
5905 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5906 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5907 struct i40e_mirror_rule *it, *mirr_rule = NULL;
5908 struct i40e_mirror_rule *parent = NULL;
5909 uint16_t seid, dst_seid, rule_id;
5913 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
5915 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
5916 PMD_DRV_LOG(ERR, "mirror rule can not be configured"
5917 " without veb or vfs.");
5920 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
5921 PMD_DRV_LOG(ERR, "mirror table is full.");
5924 if (mirror_conf->dst_pool > pf->vf_num) {
5925 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
5926 mirror_conf->dst_pool);
5930 seid = pf->main_vsi->veb->seid;
5932 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
5933 if (sw_id <= it->index) {
5939 if (mirr_rule && sw_id == mirr_rule->index) {
5941 PMD_DRV_LOG(ERR, "mirror rule exists.");
5944 ret = i40e_aq_del_mirror_rule(hw, seid,
5945 mirr_rule->rule_type,
5947 mirr_rule->num_entries, mirr_rule->id);
5949 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
5950 " ret = %d, aq_err = %d.",
5951 ret, hw->aq.asq_last_status);
5954 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
5955 rte_free(mirr_rule);
5956 pf->nb_mirror_rule--;
5960 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
5964 mirr_rule = rte_zmalloc("i40e_mirror_rule",
5965 sizeof(struct i40e_mirror_rule) , 0);
5967 PMD_DRV_LOG(ERR, "failed to allocate memory");
5968 return I40E_ERR_NO_MEMORY;
5970 switch (mirror_conf->rule_type) {
5971 case ETH_MIRROR_VLAN:
5972 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
5973 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
5974 mirr_rule->entries[j] =
5975 mirror_conf->vlan.vlan_id[i];
5980 PMD_DRV_LOG(ERR, "vlan is not specified.");
5981 rte_free(mirr_rule);
5984 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
5986 case ETH_MIRROR_VIRTUAL_POOL_UP:
5987 case ETH_MIRROR_VIRTUAL_POOL_DOWN:
5988 /* check if the specified pool bit is out of range */
5989 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
5990 PMD_DRV_LOG(ERR, "pool mask is out of range.");
5991 rte_free(mirr_rule);
5994 for (i = 0, j = 0; i < pf->vf_num; i++) {
5995 if (mirror_conf->pool_mask & (1ULL << i)) {
5996 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
6000 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
6001 /* add pf vsi to entries */
6002 mirr_rule->entries[j] = pf->main_vsi_seid;
6006 PMD_DRV_LOG(ERR, "pool is not specified.");
6007 rte_free(mirr_rule);
6010 /* egress and ingress in aq commands means from switch but not port */
6011 mirr_rule->rule_type =
6012 (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
6013 I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
6014 I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
6016 case ETH_MIRROR_UPLINK_PORT:
6017 /* egress and ingress in aq commands means from switch but not port*/
6018 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
6020 case ETH_MIRROR_DOWNLINK_PORT:
6021 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
6024 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
6025 mirror_conf->rule_type);
6026 rte_free(mirr_rule);
6030 /* If the dst_pool is equal to vf_num, consider it as PF */
6031 if (mirror_conf->dst_pool == pf->vf_num)
6032 dst_seid = pf->main_vsi_seid;
6034 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
6036 ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
6037 mirr_rule->rule_type, mirr_rule->entries,
6040 PMD_DRV_LOG(ERR, "failed to add mirror rule:"
6041 " ret = %d, aq_err = %d.",
6042 ret, hw->aq.asq_last_status);
6043 rte_free(mirr_rule);
6047 mirr_rule->index = sw_id;
6048 mirr_rule->num_entries = j;
6049 mirr_rule->id = rule_id;
6050 mirr_rule->dst_vsi_seid = dst_seid;
6053 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
6055 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
6057 pf->nb_mirror_rule++;
6062 * i40e_mirror_rule_reset
6063 * @dev: pointer to the device
6064 * @sw_id: mirror rule's sw_id
6066 * reset a mirror rule.
6070 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
6072 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6073 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6074 struct i40e_mirror_rule *it, *mirr_rule = NULL;
6078 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
6080 seid = pf->main_vsi->veb->seid;
6082 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
6083 if (sw_id == it->index) {
6089 ret = i40e_aq_del_mirror_rule(hw, seid,
6090 mirr_rule->rule_type,
6092 mirr_rule->num_entries, mirr_rule->id);
6094 PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
6095 " status = %d, aq_err = %d.",
6096 ret, hw->aq.asq_last_status);
6099 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
6100 rte_free(mirr_rule);
6101 pf->nb_mirror_rule--;
6103 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
6110 i40e_timesync_enable(struct rte_eth_dev *dev)
6112 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6113 struct rte_eth_link *link = &dev->data->dev_link;
6114 uint32_t tsync_ctl_l;
6115 uint32_t tsync_ctl_h;
6116 uint32_t tsync_inc_l;
6117 uint32_t tsync_inc_h;
6119 switch (link->link_speed) {
6120 case ETH_LINK_SPEED_40G:
6121 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
6122 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
6124 case ETH_LINK_SPEED_10G:
6125 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
6126 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
6128 case ETH_LINK_SPEED_1000:
6129 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
6130 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
6137 /* Clear timesync registers. */
6138 I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6139 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6140 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(0));
6141 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(1));
6142 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(2));
6143 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(3));
6144 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6146 /* Set the timesync increment value. */
6147 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
6148 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
6150 /* Enable timestamping of PTP packets. */
6151 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6152 tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
6154 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6155 tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
6156 tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
6158 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6159 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6165 i40e_timesync_disable(struct rte_eth_dev *dev)
6167 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6168 uint32_t tsync_ctl_l;
6169 uint32_t tsync_ctl_h;
6171 /* Disable timestamping of transmitted PTP packets. */
6172 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
6173 tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
6175 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
6176 tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
6178 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
6179 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
6181 /* Set the timesync increment value. */
6182 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
6183 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
6189 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
6190 struct timespec *timestamp, uint32_t flags)
6192 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6193 uint32_t sync_status;
6196 uint32_t index = flags & 0x03;
6198 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
6199 if ((sync_status & (1 << index)) == 0)
6202 rx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
6203 rx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index));
6205 timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
6206 timestamp->tv_nsec = 0;
6212 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
6213 struct timespec *timestamp)
6215 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6216 uint32_t sync_status;
6220 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
6221 if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
6224 tx_stmpl = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
6225 tx_stmph = I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
6227 timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
6228 timestamp->tv_nsec = 0;