4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "base/i40e_dcb.h"
39 #include "i40e_ethdev.h"
41 #include "i40e_rxtx.h"
42 #include "rte_pmd_i40e.h"
45 rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
47 struct rte_eth_dev *dev;
50 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
52 dev = &rte_eth_devices[port];
54 if (!is_i40e_supported(dev))
57 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
59 if (vf >= pf->vf_num || !pf->vfs) {
60 PMD_DRV_LOG(ERR, "Invalid argument.");
64 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
70 rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
72 struct rte_eth_dev *dev;
76 struct i40e_vsi_context ctxt;
79 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
81 dev = &rte_eth_devices[port];
83 if (!is_i40e_supported(dev))
86 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
88 if (vf_id >= pf->vf_num || !pf->vfs) {
89 PMD_DRV_LOG(ERR, "Invalid argument.");
93 vsi = pf->vfs[vf_id].vsi;
95 PMD_DRV_LOG(ERR, "Invalid VSI.");
99 /* Check if it has been already on or off */
100 if (vsi->info.valid_sections &
101 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
103 if ((vsi->info.sec_flags &
104 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
105 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
106 return 0; /* already on */
108 if ((vsi->info.sec_flags &
109 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
110 return 0; /* already off */
114 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
116 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
118 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
120 memset(&ctxt, 0, sizeof(ctxt));
121 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
122 ctxt.seid = vsi->seid;
124 hw = I40E_VSI_TO_HW(vsi);
125 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
126 if (ret != I40E_SUCCESS) {
128 PMD_DRV_LOG(ERR, "Failed to update VSI params");
135 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
139 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
140 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
143 for (j = 0; j < I40E_VFTA_SIZE; j++) {
147 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
148 if (!(vsi->vfta[j] & (1 << k)))
151 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
155 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
157 ret = i40e_aq_add_vlan(hw, vsi->seid,
158 &vlan_data, 1, NULL);
160 ret = i40e_aq_remove_vlan(hw, vsi->seid,
161 &vlan_data, 1, NULL);
162 if (ret != I40E_SUCCESS) {
164 "Failed to add/rm vlan filter");
174 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
176 struct rte_eth_dev *dev;
178 struct i40e_vsi *vsi;
180 struct i40e_vsi_context ctxt;
183 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
185 dev = &rte_eth_devices[port];
187 if (!is_i40e_supported(dev))
190 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
192 if (vf_id >= pf->vf_num || !pf->vfs) {
193 PMD_DRV_LOG(ERR, "Invalid argument.");
197 vsi = pf->vfs[vf_id].vsi;
199 PMD_DRV_LOG(ERR, "Invalid VSI.");
203 /* Check if it has been already on or off */
204 if (vsi->vlan_anti_spoof_on == on)
205 return 0; /* already on or off */
207 vsi->vlan_anti_spoof_on = on;
208 if (!vsi->vlan_filter_on) {
209 ret = i40e_add_rm_all_vlan_filter(vsi, on);
211 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
216 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
218 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
220 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
222 memset(&ctxt, 0, sizeof(ctxt));
223 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
224 ctxt.seid = vsi->seid;
226 hw = I40E_VSI_TO_HW(vsi);
227 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
228 if (ret != I40E_SUCCESS) {
230 PMD_DRV_LOG(ERR, "Failed to update VSI params");
237 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
239 struct i40e_mac_filter *f;
240 struct i40e_macvlan_filter *mv_f;
242 enum rte_mac_filter_type filter_type;
243 int ret = I40E_SUCCESS;
246 /* remove all the MACs */
247 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
248 vlan_num = vsi->vlan_num;
249 filter_type = f->mac_info.filter_type;
250 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
251 filter_type == RTE_MACVLAN_HASH_MATCH) {
253 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
254 return I40E_ERR_PARAM;
256 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
257 filter_type == RTE_MAC_HASH_MATCH)
260 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
262 PMD_DRV_LOG(ERR, "failed to allocate memory");
263 return I40E_ERR_NO_MEMORY;
266 for (i = 0; i < vlan_num; i++) {
267 mv_f[i].filter_type = filter_type;
268 rte_memcpy(&mv_f[i].macaddr,
269 &f->mac_info.mac_addr,
272 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
273 filter_type == RTE_MACVLAN_HASH_MATCH) {
274 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
275 &f->mac_info.mac_addr);
276 if (ret != I40E_SUCCESS) {
282 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
283 if (ret != I40E_SUCCESS) {
296 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
298 struct i40e_mac_filter *f;
299 struct i40e_macvlan_filter *mv_f;
301 int ret = I40E_SUCCESS;
304 /* restore all the MACs */
305 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
306 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
307 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
309 * If vlan_num is 0, that's the first time to add mac,
310 * set mask for vlan_id 0.
312 if (vsi->vlan_num == 0) {
313 i40e_set_vlan_filter(vsi, 0, 1);
316 vlan_num = vsi->vlan_num;
317 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
318 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
321 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
323 PMD_DRV_LOG(ERR, "failed to allocate memory");
324 return I40E_ERR_NO_MEMORY;
327 for (i = 0; i < vlan_num; i++) {
328 mv_f[i].filter_type = f->mac_info.filter_type;
329 rte_memcpy(&mv_f[i].macaddr,
330 &f->mac_info.mac_addr,
334 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
335 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
336 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
337 &f->mac_info.mac_addr);
338 if (ret != I40E_SUCCESS) {
344 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
345 if (ret != I40E_SUCCESS) {
358 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
360 struct i40e_vsi_context ctxt;
367 hw = I40E_VSI_TO_HW(vsi);
369 /* Use the FW API if FW >= v5.0 */
370 if (hw->aq.fw_maj_ver < 5) {
371 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
375 /* Check if it has been already on or off */
376 if (vsi->info.valid_sections &
377 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
379 if ((vsi->info.switch_id &
380 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
381 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
382 return 0; /* already on */
384 if ((vsi->info.switch_id &
385 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
386 return 0; /* already off */
390 /* remove all the MAC and VLAN first */
391 ret = i40e_vsi_rm_mac_filter(vsi);
393 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
396 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
397 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
399 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
404 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
406 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
408 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
410 memset(&ctxt, 0, sizeof(ctxt));
411 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
412 ctxt.seid = vsi->seid;
414 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
415 if (ret != I40E_SUCCESS) {
416 PMD_DRV_LOG(ERR, "Failed to update VSI params");
420 /* add all the MAC and VLAN back */
421 ret = i40e_vsi_restore_mac_filter(vsi);
424 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
425 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
434 rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
436 struct rte_eth_dev *dev;
438 struct i40e_pf_vf *vf;
439 struct i40e_vsi *vsi;
443 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
445 dev = &rte_eth_devices[port];
447 if (!is_i40e_supported(dev))
450 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
452 /* setup PF TX loopback */
454 ret = i40e_vsi_set_tx_loopback(vsi, on);
458 /* setup TX loopback for all the VFs */
460 /* if no VF, do nothing. */
464 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
465 vf = &pf->vfs[vf_id];
468 ret = i40e_vsi_set_tx_loopback(vsi, on);
477 rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
479 struct rte_eth_dev *dev;
481 struct i40e_vsi *vsi;
485 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
487 dev = &rte_eth_devices[port];
489 if (!is_i40e_supported(dev))
492 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
494 if (vf_id >= pf->vf_num || !pf->vfs) {
495 PMD_DRV_LOG(ERR, "Invalid argument.");
499 vsi = pf->vfs[vf_id].vsi;
501 PMD_DRV_LOG(ERR, "Invalid VSI.");
505 hw = I40E_VSI_TO_HW(vsi);
507 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
509 if (ret != I40E_SUCCESS) {
511 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
518 rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
520 struct rte_eth_dev *dev;
522 struct i40e_vsi *vsi;
526 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
528 dev = &rte_eth_devices[port];
530 if (!is_i40e_supported(dev))
533 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
535 if (vf_id >= pf->vf_num || !pf->vfs) {
536 PMD_DRV_LOG(ERR, "Invalid argument.");
540 vsi = pf->vfs[vf_id].vsi;
542 PMD_DRV_LOG(ERR, "Invalid VSI.");
546 hw = I40E_VSI_TO_HW(vsi);
548 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
550 if (ret != I40E_SUCCESS) {
552 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
559 rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
560 struct ether_addr *mac_addr)
562 struct i40e_mac_filter *f;
563 struct rte_eth_dev *dev;
564 struct i40e_pf_vf *vf;
565 struct i40e_vsi *vsi;
569 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
572 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
574 dev = &rte_eth_devices[port];
576 if (!is_i40e_supported(dev))
579 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
581 if (vf_id >= pf->vf_num || !pf->vfs)
584 vf = &pf->vfs[vf_id];
587 PMD_DRV_LOG(ERR, "Invalid VSI.");
591 ether_addr_copy(mac_addr, &vf->mac_addr);
593 /* Remove all existing mac */
594 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
595 if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
597 PMD_DRV_LOG(WARNING, "Delete MAC failed");
602 /* Set vlan strip on/off for specific VF from host */
604 rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
606 struct rte_eth_dev *dev;
608 struct i40e_vsi *vsi;
611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
613 dev = &rte_eth_devices[port];
615 if (!is_i40e_supported(dev))
618 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
620 if (vf_id >= pf->vf_num || !pf->vfs) {
621 PMD_DRV_LOG(ERR, "Invalid argument.");
625 vsi = pf->vfs[vf_id].vsi;
630 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
631 if (ret != I40E_SUCCESS) {
633 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
639 int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
642 struct rte_eth_dev *dev;
645 struct i40e_vsi *vsi;
646 struct i40e_vsi_context ctxt;
649 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
651 if (vlan_id > ETHER_MAX_VLAN_ID) {
652 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
656 dev = &rte_eth_devices[port];
658 if (!is_i40e_supported(dev))
661 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
662 hw = I40E_PF_TO_HW(pf);
665 * return -ENODEV if SRIOV not enabled, VF number not configured
666 * or no queue assigned.
668 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
672 if (vf_id >= pf->vf_num || !pf->vfs) {
673 PMD_DRV_LOG(ERR, "Invalid VF ID.");
677 vsi = pf->vfs[vf_id].vsi;
679 PMD_DRV_LOG(ERR, "Invalid VSI.");
683 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
684 vsi->info.pvid = vlan_id;
686 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
688 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
690 memset(&ctxt, 0, sizeof(ctxt));
691 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
692 ctxt.seid = vsi->seid;
694 hw = I40E_VSI_TO_HW(vsi);
695 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
696 if (ret != I40E_SUCCESS) {
698 PMD_DRV_LOG(ERR, "Failed to update VSI params");
704 int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
707 struct rte_eth_dev *dev;
709 struct i40e_vsi *vsi;
711 struct i40e_mac_filter_info filter;
712 struct ether_addr broadcast = {
713 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
716 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
719 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
723 dev = &rte_eth_devices[port];
725 if (!is_i40e_supported(dev))
728 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
729 hw = I40E_PF_TO_HW(pf);
731 if (vf_id >= pf->vf_num || !pf->vfs) {
732 PMD_DRV_LOG(ERR, "Invalid VF ID.");
737 * return -ENODEV if SRIOV not enabled, VF number not configured
738 * or no queue assigned.
740 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
741 pf->vf_nb_qps == 0) {
742 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
746 vsi = pf->vfs[vf_id].vsi;
748 PMD_DRV_LOG(ERR, "Invalid VSI.");
753 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
754 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
755 ret = i40e_vsi_add_mac(vsi, &filter);
757 ret = i40e_vsi_delete_mac(vsi, &broadcast);
760 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
762 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
770 int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
772 struct rte_eth_dev *dev;
775 struct i40e_vsi *vsi;
776 struct i40e_vsi_context ctxt;
779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
782 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
786 dev = &rte_eth_devices[port];
788 if (!is_i40e_supported(dev))
791 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
792 hw = I40E_PF_TO_HW(pf);
795 * return -ENODEV if SRIOV not enabled, VF number not configured
796 * or no queue assigned.
798 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
799 pf->vf_nb_qps == 0) {
800 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
804 if (vf_id >= pf->vf_num || !pf->vfs) {
805 PMD_DRV_LOG(ERR, "Invalid VF ID.");
809 vsi = pf->vfs[vf_id].vsi;
811 PMD_DRV_LOG(ERR, "Invalid VSI.");
815 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
817 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
818 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
820 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
821 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
824 memset(&ctxt, 0, sizeof(ctxt));
825 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
826 ctxt.seid = vsi->seid;
828 hw = I40E_VSI_TO_HW(vsi);
829 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
830 if (ret != I40E_SUCCESS) {
832 PMD_DRV_LOG(ERR, "Failed to update VSI params");
839 i40e_vlan_filter_count(struct i40e_vsi *vsi)
845 for (j = 0; j < I40E_VFTA_SIZE; j++) {
849 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
850 if (!(vsi->vfta[j] & (1 << k)))
853 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
864 int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
865 uint64_t vf_mask, uint8_t on)
867 struct rte_eth_dev *dev;
870 struct i40e_vsi *vsi;
872 int ret = I40E_SUCCESS;
874 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
876 dev = &rte_eth_devices[port];
878 if (!is_i40e_supported(dev))
881 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
882 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
887 PMD_DRV_LOG(ERR, "No VF.");
892 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
896 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
897 hw = I40E_PF_TO_HW(pf);
900 * return -ENODEV if SRIOV not enabled, VF number not configured
901 * or no queue assigned.
903 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
904 pf->vf_nb_qps == 0) {
905 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
909 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
910 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
911 vsi = pf->vfs[vf_idx].vsi;
913 if (!vsi->vlan_filter_on) {
914 vsi->vlan_filter_on = true;
915 i40e_aq_set_vsi_vlan_promisc(hw,
919 if (!vsi->vlan_anti_spoof_on)
920 i40e_add_rm_all_vlan_filter(
923 ret = i40e_vsi_add_vlan(vsi, vlan_id);
925 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
927 if (!i40e_vlan_filter_count(vsi)) {
928 vsi->vlan_filter_on = false;
929 i40e_aq_set_vsi_vlan_promisc(hw,
938 if (ret != I40E_SUCCESS) {
940 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
947 rte_pmd_i40e_get_vf_stats(uint16_t port,
949 struct rte_eth_stats *stats)
951 struct rte_eth_dev *dev;
953 struct i40e_vsi *vsi;
955 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
957 dev = &rte_eth_devices[port];
959 if (!is_i40e_supported(dev))
962 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
964 if (vf_id >= pf->vf_num || !pf->vfs) {
965 PMD_DRV_LOG(ERR, "Invalid VF ID.");
969 vsi = pf->vfs[vf_id].vsi;
971 PMD_DRV_LOG(ERR, "Invalid VSI.");
975 i40e_update_vsi_stats(vsi);
977 stats->ipackets = vsi->eth_stats.rx_unicast +
978 vsi->eth_stats.rx_multicast +
979 vsi->eth_stats.rx_broadcast;
980 stats->opackets = vsi->eth_stats.tx_unicast +
981 vsi->eth_stats.tx_multicast +
982 vsi->eth_stats.tx_broadcast;
983 stats->ibytes = vsi->eth_stats.rx_bytes;
984 stats->obytes = vsi->eth_stats.tx_bytes;
985 stats->ierrors = vsi->eth_stats.rx_discards;
986 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
992 rte_pmd_i40e_reset_vf_stats(uint16_t port,
995 struct rte_eth_dev *dev;
997 struct i40e_vsi *vsi;
999 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1001 dev = &rte_eth_devices[port];
1003 if (!is_i40e_supported(dev))
1006 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1008 if (vf_id >= pf->vf_num || !pf->vfs) {
1009 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1013 vsi = pf->vfs[vf_id].vsi;
1015 PMD_DRV_LOG(ERR, "Invalid VSI.");
1019 vsi->offset_loaded = false;
1020 i40e_update_vsi_stats(vsi);
1026 rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
1028 struct rte_eth_dev *dev;
1030 struct i40e_vsi *vsi;
1035 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1037 dev = &rte_eth_devices[port];
1039 if (!is_i40e_supported(dev))
1042 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1044 if (vf_id >= pf->vf_num || !pf->vfs) {
1045 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1049 vsi = pf->vfs[vf_id].vsi;
1051 PMD_DRV_LOG(ERR, "Invalid VSI.");
1055 if (bw > I40E_QOS_BW_MAX) {
1056 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1061 if (bw % I40E_QOS_BW_GRANULARITY) {
1062 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1063 I40E_QOS_BW_GRANULARITY);
1067 bw /= I40E_QOS_BW_GRANULARITY;
1069 hw = I40E_VSI_TO_HW(vsi);
1072 if (bw == vsi->bw_info.bw_limit) {
1074 "No change for VF max bandwidth. Nothing to do.");
1079 * VF bandwidth limitation and TC bandwidth limitation cannot be
1080 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1082 * If bw is 0, means disable bandwidth limitation. Then no need to
1083 * check TC bandwidth limitation.
1086 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1087 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1088 vsi->bw_info.bw_ets_credits[i])
1091 if (i != I40E_MAX_TRAFFIC_CLASS) {
1093 "TC max bandwidth has been set on this VF,"
1094 " please disable it first.");
1099 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1102 "Failed to set VF %d bandwidth, err(%d).",
1107 /* Store the configuration. */
1108 vsi->bw_info.bw_limit = (uint16_t)bw;
1109 vsi->bw_info.bw_max = 0;
1115 rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
1116 uint8_t tc_num, uint8_t *bw_weight)
1118 struct rte_eth_dev *dev;
1120 struct i40e_vsi *vsi;
1122 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1126 bool b_change = false;
1128 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1130 dev = &rte_eth_devices[port];
1132 if (!is_i40e_supported(dev))
1135 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1137 if (vf_id >= pf->vf_num || !pf->vfs) {
1138 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1142 vsi = pf->vfs[vf_id].vsi;
1144 PMD_DRV_LOG(ERR, "Invalid VSI.");
1148 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1149 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1150 I40E_MAX_TRAFFIC_CLASS);
1155 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1156 if (vsi->enabled_tc & BIT_ULL(i))
1159 if (sum != tc_num) {
1161 "Weight should be set for all %d enabled TCs.",
1167 for (i = 0; i < tc_num; i++) {
1168 if (!bw_weight[i]) {
1170 "The weight should be 1 at least.");
1173 sum += bw_weight[i];
1177 "The summary of the TC weight should be 100.");
1182 * Create the configuration for all the TCs.
1184 memset(&tc_bw, 0, sizeof(tc_bw));
1185 tc_bw.tc_valid_bits = vsi->enabled_tc;
1187 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1188 if (vsi->enabled_tc & BIT_ULL(i)) {
1190 vsi->bw_info.bw_ets_share_credits[i])
1193 tc_bw.tc_bw_credits[i] = bw_weight[j];
1201 "No change for TC allocated bandwidth."
1206 hw = I40E_VSI_TO_HW(vsi);
1208 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1211 "Failed to set VF %d TC bandwidth weight, err(%d).",
1216 /* Store the configuration. */
1218 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1219 if (vsi->enabled_tc & BIT_ULL(i)) {
1220 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1229 rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
1230 uint8_t tc_no, uint32_t bw)
1232 struct rte_eth_dev *dev;
1234 struct i40e_vsi *vsi;
1236 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1240 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1242 dev = &rte_eth_devices[port];
1244 if (!is_i40e_supported(dev))
1247 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1249 if (vf_id >= pf->vf_num || !pf->vfs) {
1250 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1254 vsi = pf->vfs[vf_id].vsi;
1256 PMD_DRV_LOG(ERR, "Invalid VSI.");
1260 if (bw > I40E_QOS_BW_MAX) {
1261 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1266 if (bw % I40E_QOS_BW_GRANULARITY) {
1267 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1268 I40E_QOS_BW_GRANULARITY);
1272 bw /= I40E_QOS_BW_GRANULARITY;
1274 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1275 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1276 I40E_MAX_TRAFFIC_CLASS);
1280 hw = I40E_VSI_TO_HW(vsi);
1282 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1283 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1289 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1291 "No change for TC max bandwidth. Nothing to do.");
1296 * VF bandwidth limitation and TC bandwidth limitation cannot be
1297 * enabled in parallel, disable VF bandwidth limitation if it's
1299 * If bw is 0, means disable bandwidth limitation. Then no need to
1300 * care about VF bandwidth limitation configuration.
1302 if (bw && vsi->bw_info.bw_limit) {
1303 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1306 "Failed to disable VF(%d)"
1307 " bandwidth limitation, err(%d).",
1313 "VF max bandwidth is disabled according"
1314 " to TC max bandwidth setting.");
1318 * Get all the TCs' info to create a whole picture.
1319 * Because the incremental change isn't permitted.
1321 memset(&tc_bw, 0, sizeof(tc_bw));
1322 tc_bw.tc_valid_bits = vsi->enabled_tc;
1323 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1324 if (vsi->enabled_tc & BIT_ULL(i)) {
1325 tc_bw.tc_bw_credits[i] =
1327 vsi->bw_info.bw_ets_credits[i]);
1330 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1332 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1335 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1340 /* Store the configuration. */
1341 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1347 rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
1349 struct rte_eth_dev *dev;
1351 struct i40e_vsi *vsi;
1352 struct i40e_veb *veb;
1354 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1358 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1360 dev = &rte_eth_devices[port];
1362 if (!is_i40e_supported(dev))
1365 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1369 PMD_DRV_LOG(ERR, "Invalid VSI.");
1375 PMD_DRV_LOG(ERR, "Invalid VEB.");
1379 if ((tc_map & veb->enabled_tc) != tc_map) {
1381 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1386 if (tc_map == veb->strict_prio_tc) {
1387 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1391 hw = I40E_VSI_TO_HW(vsi);
1393 /* Disable DCBx if it's the first time to set strict priority. */
1394 if (!veb->strict_prio_tc) {
1395 ret = i40e_aq_stop_lldp(hw, true, NULL);
1398 "Failed to disable DCBx as it's already"
1402 "DCBx is disabled according to strict"
1403 " priority setting.");
1406 memset(&ets_data, 0, sizeof(ets_data));
1407 ets_data.tc_valid_bits = veb->enabled_tc;
1408 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1409 ets_data.tc_strict_priority_flags = tc_map;
1410 /* Get all TCs' bandwidth. */
1411 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1412 if (veb->enabled_tc & BIT_ULL(i)) {
1413 /* For rubust, if bandwidth is 0, use 1 instead. */
1414 if (veb->bw_info.bw_ets_share_credits[i])
1415 ets_data.tc_bw_share_credits[i] =
1416 veb->bw_info.bw_ets_share_credits[i];
1418 ets_data.tc_bw_share_credits[i] =
1419 I40E_QOS_BW_WEIGHT_MIN;
1423 if (!veb->strict_prio_tc)
1424 ret = i40e_aq_config_switch_comp_ets(
1425 hw, veb->uplink_seid,
1426 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1429 ret = i40e_aq_config_switch_comp_ets(
1430 hw, veb->uplink_seid,
1431 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1434 ret = i40e_aq_config_switch_comp_ets(
1435 hw, veb->uplink_seid,
1436 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1441 "Failed to set TCs' strict priority mode."
1446 veb->strict_prio_tc = tc_map;
1448 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1450 ret = i40e_aq_start_lldp(hw, NULL);
1453 "Failed to enable DCBx, err(%d).", ret);
1458 "DCBx is enabled again according to strict"
1459 " priority setting.");
1465 #define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
1466 #define I40E_MAX_PROFILE_NUM 16
1469 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1470 uint32_t track_id, uint8_t *profile_info_sec,
1473 struct i40e_profile_section_header *sec = NULL;
1474 struct i40e_profile_info *pinfo;
1476 sec = (struct i40e_profile_section_header *)profile_info_sec;
1478 sec->data_end = sizeof(struct i40e_profile_section_header) +
1479 sizeof(struct i40e_profile_info);
1480 sec->section.type = SECTION_TYPE_INFO;
1481 sec->section.offset = sizeof(struct i40e_profile_section_header);
1482 sec->section.size = sizeof(struct i40e_profile_info);
1483 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1484 sec->section.offset);
1485 pinfo->track_id = track_id;
1486 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1487 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1489 pinfo->op = I40E_DDP_ADD_TRACKID;
1491 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1494 static enum i40e_status_code
1495 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1497 enum i40e_status_code status = I40E_SUCCESS;
1498 struct i40e_profile_section_header *sec;
1500 uint32_t offset = 0;
1503 sec = (struct i40e_profile_section_header *)profile_info_sec;
1504 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1505 sec->section.offset))->track_id;
1507 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1508 track_id, &offset, &info, NULL);
1510 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1511 "offset %d, info %d",
1517 /* Check if the profile info exists */
1519 i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
1521 struct rte_eth_dev *dev = &rte_eth_devices[port];
1522 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524 struct rte_pmd_i40e_profile_list *p_list;
1525 struct rte_pmd_i40e_profile_info *pinfo, *p;
1529 buff = rte_zmalloc("pinfo_list",
1530 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1533 PMD_DRV_LOG(ERR, "failed to allocate memory");
1537 ret = i40e_aq_get_ddp_list(
1539 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1542 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1546 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1547 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1548 sizeof(struct i40e_profile_section_header));
1549 for (i = 0; i < p_list->p_count; i++) {
1550 p = &p_list->p_info[i];
1551 if (pinfo->track_id == p->track_id) {
1552 PMD_DRV_LOG(INFO, "Profile exists.");
1563 rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
1565 enum rte_pmd_i40e_package_op op)
1567 struct rte_eth_dev *dev;
1569 struct i40e_package_header *pkg_hdr;
1570 struct i40e_generic_seg_header *profile_seg_hdr;
1571 struct i40e_generic_seg_header *metadata_seg_hdr;
1573 uint8_t *profile_info_sec;
1575 enum i40e_status_code status = I40E_SUCCESS;
1577 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
1578 op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
1579 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
1580 PMD_DRV_LOG(ERR, "Operation not supported.");
1584 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1586 dev = &rte_eth_devices[port];
1588 if (!is_i40e_supported(dev))
1591 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593 if (size < (sizeof(struct i40e_package_header) +
1594 sizeof(struct i40e_metadata_segment) +
1595 sizeof(uint32_t) * 2)) {
1596 PMD_DRV_LOG(ERR, "Buff is invalid.");
1600 pkg_hdr = (struct i40e_package_header *)buff;
1603 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1607 if (pkg_hdr->segment_count < 2) {
1608 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1612 i40e_update_customized_info(dev, buff, size);
1614 /* Find metadata segment */
1615 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1617 if (!metadata_seg_hdr) {
1618 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1621 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1622 if (track_id == I40E_DDP_TRACKID_INVALID) {
1623 PMD_DRV_LOG(ERR, "Invalid track_id");
1627 /* Find profile segment */
1628 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1630 if (!profile_seg_hdr) {
1631 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1635 profile_info_sec = rte_zmalloc(
1636 "i40e_profile_info",
1637 sizeof(struct i40e_profile_section_header) +
1638 sizeof(struct i40e_profile_info),
1640 if (!profile_info_sec) {
1641 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1645 /* Check if the profile already loaded */
1646 i40e_generate_profile_info_sec(
1647 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1648 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1649 track_id, profile_info_sec,
1650 op == RTE_PMD_I40E_PKG_OP_WR_ADD);
1651 is_exist = i40e_check_profile_info(port, profile_info_sec);
1653 PMD_DRV_LOG(ERR, "Failed to check profile.");
1654 rte_free(profile_info_sec);
1658 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1660 PMD_DRV_LOG(ERR, "Profile already exists.");
1661 rte_free(profile_info_sec);
1664 } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1666 PMD_DRV_LOG(ERR, "Profile does not exist.");
1667 rte_free(profile_info_sec);
1672 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
1673 status = i40e_rollback_profile(
1675 (struct i40e_profile_segment *)profile_seg_hdr,
1678 PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
1679 rte_free(profile_info_sec);
1683 status = i40e_write_profile(
1685 (struct i40e_profile_segment *)profile_seg_hdr,
1688 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1689 PMD_DRV_LOG(ERR, "Failed to write profile for add.");
1691 PMD_DRV_LOG(ERR, "Failed to write profile.");
1692 rte_free(profile_info_sec);
1697 if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
1698 /* Modify loaded profiles info list */
1699 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1701 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
1702 PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
1704 PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
1708 rte_free(profile_info_sec);
1712 /* Get number of tvl records in the section */
1714 i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
1716 unsigned int i, nb_rec, nb_tlv = 0;
1717 struct i40e_profile_tlv_section_record *tlv;
1722 /* get number of records in the section */
1723 nb_rec = sec->section.size /
1724 sizeof(struct i40e_profile_tlv_section_record);
1725 for (i = 0; i < nb_rec; ) {
1726 tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
1733 int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
1734 uint8_t *info_buff, uint32_t info_size,
1735 enum rte_pmd_i40e_package_info type)
1738 struct i40e_package_header *pkg_hdr;
1739 struct i40e_generic_seg_header *i40e_seg_hdr;
1740 struct i40e_generic_seg_header *note_seg_hdr;
1741 struct i40e_generic_seg_header *metadata_seg_hdr;
1744 PMD_DRV_LOG(ERR, "Output info buff is invalid.");
1748 if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
1749 sizeof(struct i40e_metadata_segment) +
1750 sizeof(uint32_t) * 2)) {
1751 PMD_DRV_LOG(ERR, "Package buff is invalid.");
1755 pkg_hdr = (struct i40e_package_header *)pkg_buff;
1756 if (pkg_hdr->segment_count < 2) {
1757 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1761 /* Find metadata segment */
1762 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1765 /* Find global notes segment */
1766 note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
1769 /* Find i40e profile segment */
1770 i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
1772 /* get global header info */
1773 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
1774 struct rte_pmd_i40e_profile_info *info =
1775 (struct rte_pmd_i40e_profile_info *)info_buff;
1777 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1778 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1782 if (!metadata_seg_hdr) {
1783 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1787 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1788 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1790 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1793 ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
1794 I40E_DDP_NAME_SIZE);
1795 memcpy(&info->version,
1796 &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
1797 sizeof(struct i40e_ddp_version));
1798 return I40E_SUCCESS;
1801 /* get global note size */
1802 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
1803 if (info_size < sizeof(uint32_t)) {
1804 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1807 if (note_seg_hdr == NULL)
1810 ret_size = note_seg_hdr->size;
1811 *(uint32_t *)info_buff = ret_size;
1812 return I40E_SUCCESS;
1815 /* get global note */
1816 if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
1817 if (note_seg_hdr == NULL)
1819 if (info_size < note_seg_hdr->size) {
1820 PMD_DRV_LOG(ERR, "Information buffer size is too small");
1823 memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size);
1824 return I40E_SUCCESS;
1827 /* get i40e segment header info */
1828 if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
1829 struct rte_pmd_i40e_profile_info *info =
1830 (struct rte_pmd_i40e_profile_info *)info_buff;
1832 if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
1833 PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
1837 if (!metadata_seg_hdr) {
1838 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1842 if (!i40e_seg_hdr) {
1843 PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
1847 memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
1848 info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
1850 ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1853 ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
1854 I40E_DDP_NAME_SIZE);
1855 memcpy(&info->version,
1856 &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
1857 sizeof(struct i40e_ddp_version));
1858 return I40E_SUCCESS;
1861 /* get number of devices */
1862 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
1863 if (info_size < sizeof(uint32_t)) {
1864 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1867 *(uint32_t *)info_buff =
1868 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1869 return I40E_SUCCESS;
1872 /* get list of devices */
1873 if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
1876 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
1877 if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
1878 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1882 ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
1883 sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
1884 return I40E_SUCCESS;
1887 /* get number of protocols */
1888 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
1889 struct i40e_profile_section_header *proto;
1891 if (info_size < sizeof(uint32_t)) {
1892 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1895 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1896 (struct i40e_profile_segment *)i40e_seg_hdr);
1897 *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
1898 return I40E_SUCCESS;
1901 /* get list of protocols */
1902 if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
1903 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1904 struct rte_pmd_i40e_proto_info *pinfo;
1905 struct i40e_profile_section_header *proto;
1906 struct i40e_profile_tlv_section_record *tlv;
1908 pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
1909 nb_proto_info = info_size /
1910 sizeof(struct rte_pmd_i40e_proto_info);
1911 for (i = 0; i < nb_proto_info; i++) {
1912 pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
1913 memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
1915 proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
1916 (struct i40e_profile_segment *)i40e_seg_hdr);
1917 nb_tlv = i40e_get_tlv_section_size(proto);
1919 return I40E_SUCCESS;
1920 if (nb_proto_info < nb_tlv) {
1921 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1924 /* get number of records in the section */
1925 nb_rec = proto->section.size /
1926 sizeof(struct i40e_profile_tlv_section_record);
1927 tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
1928 for (i = j = 0; i < nb_rec; j++) {
1929 pinfo[j].proto_id = tlv->data[0];
1930 strncpy(pinfo[j].name, (const char *)&tlv->data[1],
1931 I40E_DDP_NAME_SIZE);
1933 tlv = &tlv[tlv->len];
1935 return I40E_SUCCESS;
1938 /* get number of packet classification types */
1939 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
1940 struct i40e_profile_section_header *pctype;
1942 if (info_size < sizeof(uint32_t)) {
1943 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1946 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1947 (struct i40e_profile_segment *)i40e_seg_hdr);
1948 *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
1949 return I40E_SUCCESS;
1952 /* get list of packet classification types */
1953 if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
1954 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
1955 struct rte_pmd_i40e_ptype_info *pinfo;
1956 struct i40e_profile_section_header *pctype;
1957 struct i40e_profile_tlv_section_record *tlv;
1959 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
1960 nb_proto_info = info_size /
1961 sizeof(struct rte_pmd_i40e_ptype_info);
1962 for (i = 0; i < nb_proto_info; i++)
1963 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
1964 sizeof(struct rte_pmd_i40e_ptype_info));
1965 pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
1966 (struct i40e_profile_segment *)i40e_seg_hdr);
1967 nb_tlv = i40e_get_tlv_section_size(pctype);
1969 return I40E_SUCCESS;
1970 if (nb_proto_info < nb_tlv) {
1971 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1975 /* get number of records in the section */
1976 nb_rec = pctype->section.size /
1977 sizeof(struct i40e_profile_tlv_section_record);
1978 tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
1979 for (i = j = 0; i < nb_rec; j++) {
1980 memcpy(&pinfo[j], tlv->data,
1981 sizeof(struct rte_pmd_i40e_ptype_info));
1983 tlv = &tlv[tlv->len];
1985 return I40E_SUCCESS;
1988 /* get number of packet types */
1989 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
1990 struct i40e_profile_section_header *ptype;
1992 if (info_size < sizeof(uint32_t)) {
1993 PMD_DRV_LOG(ERR, "Invalid information buffer size");
1996 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
1997 (struct i40e_profile_segment *)i40e_seg_hdr);
1998 *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
1999 return I40E_SUCCESS;
2002 /* get list of packet types */
2003 if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
2004 uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
2005 struct rte_pmd_i40e_ptype_info *pinfo;
2006 struct i40e_profile_section_header *ptype;
2007 struct i40e_profile_tlv_section_record *tlv;
2009 pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
2010 nb_proto_info = info_size /
2011 sizeof(struct rte_pmd_i40e_ptype_info);
2012 for (i = 0; i < nb_proto_info; i++)
2013 memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
2014 sizeof(struct rte_pmd_i40e_ptype_info));
2015 ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
2016 (struct i40e_profile_segment *)i40e_seg_hdr);
2017 nb_tlv = i40e_get_tlv_section_size(ptype);
2019 return I40E_SUCCESS;
2020 if (nb_proto_info < nb_tlv) {
2021 PMD_DRV_LOG(ERR, "Invalid information buffer size");
2024 /* get number of records in the section */
2025 nb_rec = ptype->section.size /
2026 sizeof(struct i40e_profile_tlv_section_record);
2027 for (i = j = 0; i < nb_rec; j++) {
2028 tlv = (struct i40e_profile_tlv_section_record *)
2030 memcpy(&pinfo[j], tlv->data,
2031 sizeof(struct rte_pmd_i40e_ptype_info));
2034 return I40E_SUCCESS;
2037 PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
2042 rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
2044 struct rte_eth_dev *dev;
2046 enum i40e_status_code status = I40E_SUCCESS;
2048 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2050 dev = &rte_eth_devices[port];
2052 if (!is_i40e_supported(dev))
2055 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
2058 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2060 status = i40e_aq_get_ddp_list(hw, (void *)buff,
2066 static int check_invalid_pkt_type(uint32_t pkt_type)
2068 uint32_t l2, l3, l4, tnl, il2, il3, il4;
2070 l2 = pkt_type & RTE_PTYPE_L2_MASK;
2071 l3 = pkt_type & RTE_PTYPE_L3_MASK;
2072 l4 = pkt_type & RTE_PTYPE_L4_MASK;
2073 tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
2074 il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
2075 il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
2076 il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
2079 l2 != RTE_PTYPE_L2_ETHER &&
2080 l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
2081 l2 != RTE_PTYPE_L2_ETHER_ARP &&
2082 l2 != RTE_PTYPE_L2_ETHER_LLDP &&
2083 l2 != RTE_PTYPE_L2_ETHER_NSH &&
2084 l2 != RTE_PTYPE_L2_ETHER_VLAN &&
2085 l2 != RTE_PTYPE_L2_ETHER_QINQ)
2089 l3 != RTE_PTYPE_L3_IPV4 &&
2090 l3 != RTE_PTYPE_L3_IPV4_EXT &&
2091 l3 != RTE_PTYPE_L3_IPV6 &&
2092 l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
2093 l3 != RTE_PTYPE_L3_IPV6_EXT &&
2094 l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
2098 l4 != RTE_PTYPE_L4_TCP &&
2099 l4 != RTE_PTYPE_L4_UDP &&
2100 l4 != RTE_PTYPE_L4_FRAG &&
2101 l4 != RTE_PTYPE_L4_SCTP &&
2102 l4 != RTE_PTYPE_L4_ICMP &&
2103 l4 != RTE_PTYPE_L4_NONFRAG)
2107 tnl != RTE_PTYPE_TUNNEL_IP &&
2108 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2109 tnl != RTE_PTYPE_TUNNEL_VXLAN &&
2110 tnl != RTE_PTYPE_TUNNEL_NVGRE &&
2111 tnl != RTE_PTYPE_TUNNEL_GENEVE &&
2112 tnl != RTE_PTYPE_TUNNEL_GRENAT &&
2113 tnl != RTE_PTYPE_TUNNEL_GTPC &&
2114 tnl != RTE_PTYPE_TUNNEL_GTPU)
2118 il2 != RTE_PTYPE_INNER_L2_ETHER &&
2119 il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
2120 il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
2124 il3 != RTE_PTYPE_INNER_L3_IPV4 &&
2125 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
2126 il3 != RTE_PTYPE_INNER_L3_IPV6 &&
2127 il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
2128 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
2129 il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
2133 il4 != RTE_PTYPE_INNER_L4_TCP &&
2134 il4 != RTE_PTYPE_INNER_L4_UDP &&
2135 il4 != RTE_PTYPE_INNER_L4_FRAG &&
2136 il4 != RTE_PTYPE_INNER_L4_SCTP &&
2137 il4 != RTE_PTYPE_INNER_L4_ICMP &&
2138 il4 != RTE_PTYPE_INNER_L4_NONFRAG)
2144 static int check_invalid_ptype_mapping(
2145 struct rte_pmd_i40e_ptype_mapping *mapping_table,
2150 for (i = 0; i < count; i++) {
2151 uint16_t ptype = mapping_table[i].hw_ptype;
2152 uint32_t pkt_type = mapping_table[i].sw_ptype;
2154 if (ptype >= I40E_MAX_PKT_TYPE)
2157 if (pkt_type == RTE_PTYPE_UNKNOWN)
2160 if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
2163 if (check_invalid_pkt_type(pkt_type))
2171 rte_pmd_i40e_ptype_mapping_update(
2173 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2177 struct rte_eth_dev *dev;
2178 struct i40e_adapter *ad;
2181 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2183 dev = &rte_eth_devices[port];
2185 if (!is_i40e_supported(dev))
2188 if (count > I40E_MAX_PKT_TYPE)
2191 if (check_invalid_ptype_mapping(mapping_items, count))
2194 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2197 for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
2198 ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
2201 for (i = 0; i < count; i++)
2202 ad->ptype_tbl[mapping_items[i].hw_ptype]
2203 = mapping_items[i].sw_ptype;
2208 int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
2210 struct rte_eth_dev *dev;
2212 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2214 dev = &rte_eth_devices[port];
2216 if (!is_i40e_supported(dev))
2219 i40e_set_default_ptype_table(dev);
2224 int rte_pmd_i40e_ptype_mapping_get(
2226 struct rte_pmd_i40e_ptype_mapping *mapping_items,
2231 struct rte_eth_dev *dev;
2232 struct i40e_adapter *ad;
2236 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2238 dev = &rte_eth_devices[port];
2240 if (!is_i40e_supported(dev))
2243 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2245 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2248 if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
2250 mapping_items[n].hw_ptype = i;
2251 mapping_items[n].sw_ptype = ad->ptype_tbl[i];
2259 int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
2264 struct rte_eth_dev *dev;
2265 struct i40e_adapter *ad;
2268 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2270 dev = &rte_eth_devices[port];
2272 if (!is_i40e_supported(dev))
2275 if (!mask && check_invalid_pkt_type(target))
2278 if (check_invalid_pkt_type(pkt_type))
2281 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2283 for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
2285 if ((target | ad->ptype_tbl[i]) == target &&
2286 (target & ad->ptype_tbl[i]))
2287 ad->ptype_tbl[i] = pkt_type;
2289 if (ad->ptype_tbl[i] == target)
2290 ad->ptype_tbl[i] = pkt_type;
2298 rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id,
2299 struct ether_addr *mac_addr)
2301 struct rte_eth_dev *dev;
2302 struct i40e_pf_vf *vf;
2303 struct i40e_vsi *vsi;
2305 struct i40e_mac_filter_info mac_filter;
2308 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
2311 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2313 dev = &rte_eth_devices[port];
2315 if (!is_i40e_supported(dev))
2318 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2320 if (vf_id >= pf->vf_num || !pf->vfs)
2323 vf = &pf->vfs[vf_id];
2326 PMD_DRV_LOG(ERR, "Invalid VSI.");
2330 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
2331 ether_addr_copy(mac_addr, &mac_filter.mac_addr);
2332 ret = i40e_vsi_add_mac(vsi, &mac_filter);
2333 if (ret != I40E_SUCCESS) {
2334 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
2341 int rte_pmd_i40e_flow_type_mapping_reset(uint8_t port)
2343 struct rte_eth_dev *dev;
2345 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2347 dev = &rte_eth_devices[port];
2349 if (!is_i40e_supported(dev))
2352 i40e_set_default_pctype_table(dev);
2357 int rte_pmd_i40e_flow_type_mapping_get(
2359 struct rte_pmd_i40e_flow_type_mapping *mapping_items)
2361 struct rte_eth_dev *dev;
2362 struct i40e_adapter *ad;
2365 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2367 dev = &rte_eth_devices[port];
2369 if (!is_i40e_supported(dev))
2372 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2374 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
2375 mapping_items[i].flow_type = i;
2376 mapping_items[i].pctype = ad->pctypes_tbl[i];
2383 rte_pmd_i40e_flow_type_mapping_update(
2385 struct rte_pmd_i40e_flow_type_mapping *mapping_items,
2389 struct rte_eth_dev *dev;
2390 struct i40e_adapter *ad;
2393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2395 dev = &rte_eth_devices[port];
2397 if (!is_i40e_supported(dev))
2400 if (count > I40E_FLOW_TYPE_MAX)
2403 for (i = 0; i < count; i++)
2404 if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
2405 mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
2406 (mapping_items[i].pctype &
2407 (1ULL << I40E_FILTER_PCTYPE_INVALID)))
2410 ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2413 for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
2414 ad->pctypes_tbl[i] = 0ULL;
2415 ad->flow_types_mask = 0ULL;
2418 for (i = 0; i < count; i++) {
2419 ad->pctypes_tbl[mapping_items[i].flow_type] =
2420 mapping_items[i].pctype;
2421 if (mapping_items[i].pctype)
2422 ad->flow_types_mask |=
2423 (1ULL << mapping_items[i].flow_type);
2425 ad->flow_types_mask &=
2426 ~(1ULL << mapping_items[i].flow_type);
2429 for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
2430 ad->pctypes_mask |= ad->pctypes_tbl[i];
2436 rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
2438 struct rte_eth_dev *dev;
2439 struct ether_addr *mac;
2442 struct i40e_pf_vf *vf;
2445 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
2446 dev = &rte_eth_devices[port];
2448 if (!is_i40e_supported(dev))
2451 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2452 vf_num = pf->vf_num;
2454 for (vf_id = 0; vf_id < vf_num; vf_id++) {
2455 vf = &pf->vfs[vf_id];
2456 mac = &vf->mac_addr;
2458 if (is_same_ether_addr(mac, vf_mac))
2466 i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
2470 struct i40e_vsi *vsi = pf->main_vsi;
2471 uint16_t queue_offset, bsf, tc_index;
2472 struct i40e_vsi_context ctxt;
2473 struct i40e_aqc_vsi_properties_data *vsi_info;
2474 struct i40e_queue_regions *region_info =
2476 int32_t ret = -EINVAL;
2478 if (!region_info->queue_region_number) {
2479 PMD_INIT_LOG(ERR, "there is no that region id been set before");
2483 memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
2485 /* Update Queue Pairs Mapping for currently enabled UPs */
2486 ctxt.seid = vsi->seid;
2487 ctxt.pf_num = hw->pf_id;
2489 ctxt.uplink_seid = vsi->uplink_seid;
2490 ctxt.info = vsi->info;
2491 vsi_info = &ctxt.info;
2493 memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
2494 memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
2496 /* Configure queue region and queue mapping parameters,
2497 * for enabled queue region, allocate queues to this region.
2500 for (i = 0; i < region_info->queue_region_number; i++) {
2501 tc_index = region_info->region[i].region_id;
2502 bsf = rte_bsf32(region_info->region[i].queue_num);
2503 queue_offset = region_info->region[i].queue_start_index;
2504 vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
2505 (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2506 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
2509 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
2510 vsi_info->mapping_flags |=
2511 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2512 vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
2513 vsi_info->valid_sections |=
2514 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
2516 /* Update the VSI after updating the VSI queue-mapping information */
2517 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2519 PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
2520 hw->aq.asq_last_status);
2523 /* update the local VSI info with updated queue map */
2524 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
2525 sizeof(vsi->info.tc_mapping));
2526 rte_memcpy(&vsi->info.queue_mapping,
2527 &ctxt.info.queue_mapping,
2528 sizeof(vsi->info.queue_mapping));
2529 vsi->info.mapping_flags = ctxt.info.mapping_flags;
2530 vsi->info.valid_sections = 0;
2537 i40e_queue_region_set_region(struct i40e_pf *pf,
2538 struct rte_pmd_i40e_queue_region_conf *conf_ptr)
2541 struct i40e_vsi *main_vsi = pf->main_vsi;
2542 struct i40e_queue_regions *info = &pf->queue_region;
2543 int32_t ret = -EINVAL;
2545 if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
2546 conf_ptr->queue_num <= 64)) {
2547 PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
2548 "total number of queues do not exceed the VSI allocation");
2552 if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
2553 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2557 if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
2558 > main_vsi->nb_used_qps) {
2559 PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
2563 for (i = 0; i < info->queue_region_number; i++)
2564 if (conf_ptr->region_id == info->region[i].region_id)
2567 if (i == info->queue_region_number &&
2568 i <= I40E_REGION_MAX_INDEX) {
2569 info->region[i].region_id = conf_ptr->region_id;
2570 info->region[i].queue_num = conf_ptr->queue_num;
2571 info->region[i].queue_start_index =
2572 conf_ptr->queue_start_index;
2573 info->queue_region_number++;
2575 PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
2583 i40e_queue_region_set_flowtype(struct i40e_pf *pf,
2584 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2586 int32_t ret = -EINVAL;
2587 struct i40e_queue_regions *info = &pf->queue_region;
2589 uint16_t region_index, flowtype_index;
2591 /* For the pctype or hardware flowtype of packet,
2592 * the specific index for each type has been defined
2593 * in file i40e_type.h as enum i40e_filter_pctype.
2596 if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
2597 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2601 if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
2602 PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
2607 for (i = 0; i < info->queue_region_number; i++)
2608 if (rss_region_conf->region_id == info->region[i].region_id)
2611 if (i == info->queue_region_number) {
2612 PMD_DRV_LOG(ERR, "that region id has not been set before");
2618 for (i = 0; i < info->queue_region_number; i++) {
2619 for (j = 0; j < info->region[i].flowtype_num; j++) {
2620 if (rss_region_conf->hw_flowtype ==
2621 info->region[i].hw_flowtype[j]) {
2622 PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
2628 flowtype_index = info->region[region_index].flowtype_num;
2629 info->region[region_index].hw_flowtype[flowtype_index] =
2630 rss_region_conf->hw_flowtype;
2631 info->region[region_index].flowtype_num++;
2637 i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
2640 uint8_t hw_flowtype;
2641 uint32_t pfqf_hregion;
2642 uint16_t i, j, index;
2643 struct i40e_queue_regions *info = &pf->queue_region;
2645 /* For the pctype or hardware flowtype of packet,
2646 * the specific index for each type has been defined
2647 * in file i40e_type.h as enum i40e_filter_pctype.
2650 for (i = 0; i < info->queue_region_number; i++) {
2651 for (j = 0; j < info->region[i].flowtype_num; j++) {
2652 hw_flowtype = info->region[i].hw_flowtype[j];
2653 index = hw_flowtype >> 3;
2655 i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
2657 if ((hw_flowtype & 0x7) == 0) {
2658 pfqf_hregion |= info->region[i].region_id <<
2659 I40E_PFQF_HREGION_REGION_0_SHIFT;
2660 pfqf_hregion |= 1 <<
2661 I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
2662 } else if ((hw_flowtype & 0x7) == 1) {
2663 pfqf_hregion |= info->region[i].region_id <<
2664 I40E_PFQF_HREGION_REGION_1_SHIFT;
2665 pfqf_hregion |= 1 <<
2666 I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
2667 } else if ((hw_flowtype & 0x7) == 2) {
2668 pfqf_hregion |= info->region[i].region_id <<
2669 I40E_PFQF_HREGION_REGION_2_SHIFT;
2670 pfqf_hregion |= 1 <<
2671 I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
2672 } else if ((hw_flowtype & 0x7) == 3) {
2673 pfqf_hregion |= info->region[i].region_id <<
2674 I40E_PFQF_HREGION_REGION_3_SHIFT;
2675 pfqf_hregion |= 1 <<
2676 I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
2677 } else if ((hw_flowtype & 0x7) == 4) {
2678 pfqf_hregion |= info->region[i].region_id <<
2679 I40E_PFQF_HREGION_REGION_4_SHIFT;
2680 pfqf_hregion |= 1 <<
2681 I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
2682 } else if ((hw_flowtype & 0x7) == 5) {
2683 pfqf_hregion |= info->region[i].region_id <<
2684 I40E_PFQF_HREGION_REGION_5_SHIFT;
2685 pfqf_hregion |= 1 <<
2686 I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
2687 } else if ((hw_flowtype & 0x7) == 6) {
2688 pfqf_hregion |= info->region[i].region_id <<
2689 I40E_PFQF_HREGION_REGION_6_SHIFT;
2690 pfqf_hregion |= 1 <<
2691 I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
2693 pfqf_hregion |= info->region[i].region_id <<
2694 I40E_PFQF_HREGION_REGION_7_SHIFT;
2695 pfqf_hregion |= 1 <<
2696 I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
2699 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
2706 i40e_queue_region_set_user_priority(struct i40e_pf *pf,
2707 struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
2709 struct i40e_queue_regions *info = &pf->queue_region;
2710 int32_t ret = -EINVAL;
2711 uint16_t i, j, region_index;
2713 if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
2714 PMD_DRV_LOG(ERR, "the queue region max index is 7");
2718 if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
2719 PMD_DRV_LOG(ERR, "the region_id max index is 7");
2723 for (i = 0; i < info->queue_region_number; i++)
2724 if (rss_region_conf->region_id == info->region[i].region_id)
2727 if (i == info->queue_region_number) {
2728 PMD_DRV_LOG(ERR, "that region id has not been set before");
2735 for (i = 0; i < info->queue_region_number; i++) {
2736 for (j = 0; j < info->region[i].user_priority_num; j++) {
2737 if (info->region[i].user_priority[j] ==
2738 rss_region_conf->user_priority) {
2739 PMD_DRV_LOG(ERR, "that user priority has been set before");
2745 j = info->region[region_index].user_priority_num;
2746 info->region[region_index].user_priority[j] =
2747 rss_region_conf->user_priority;
2748 info->region[region_index].user_priority_num++;
2754 i40e_queue_region_dcb_configure(struct i40e_hw *hw,
2757 struct i40e_dcbx_config dcb_cfg_local;
2758 struct i40e_dcbx_config *dcb_cfg;
2759 struct i40e_queue_regions *info = &pf->queue_region;
2760 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
2761 int32_t ret = -EINVAL;
2762 uint16_t i, j, prio_index, region_index;
2763 uint8_t tc_map, tc_bw, bw_lf;
2765 if (!info->queue_region_number) {
2766 PMD_DRV_LOG(ERR, "No queue region been set before");
2770 dcb_cfg = &dcb_cfg_local;
2771 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
2773 /* assume each tc has the same bw */
2774 tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
2775 for (i = 0; i < info->queue_region_number; i++)
2776 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
2777 /* to ensure the sum of tcbw is equal to 100 */
2778 bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
2779 for (i = 0; i < bw_lf; i++)
2780 dcb_cfg->etscfg.tcbwtable[i]++;
2782 /* assume each tc has the same Transmission Selection Algorithm */
2783 for (i = 0; i < info->queue_region_number; i++)
2784 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
2786 for (i = 0; i < info->queue_region_number; i++) {
2787 for (j = 0; j < info->region[i].user_priority_num; j++) {
2788 prio_index = info->region[i].user_priority[j];
2789 region_index = info->region[i].region_id;
2790 dcb_cfg->etscfg.prioritytable[prio_index] =
2795 /* FW needs one App to configure HW */
2796 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
2797 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
2798 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
2799 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
2801 tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
2803 dcb_cfg->pfc.willing = 0;
2804 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
2805 dcb_cfg->pfc.pfcenable = tc_map;
2807 /* Copy the new config to the current config */
2808 *old_cfg = *dcb_cfg;
2809 old_cfg->etsrec = old_cfg->etscfg;
2810 ret = i40e_set_dcb_config(hw);
2813 PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
2814 i40e_stat_str(hw, ret),
2815 i40e_aq_str(hw, hw->aq.asq_last_status));
2823 i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
2824 struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
2826 int32_t ret = -EINVAL;
2827 struct i40e_queue_regions *info = &pf->queue_region;
2830 i40e_queue_region_pf_flowtype_conf(hw, pf);
2832 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2833 if (ret != I40E_SUCCESS) {
2834 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2838 ret = i40e_queue_region_dcb_configure(hw, pf);
2839 if (ret != I40E_SUCCESS) {
2840 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2847 info->queue_region_number = 1;
2848 info->region[0].queue_num = 64;
2849 info->region[0].queue_start_index = 0;
2851 ret = i40e_vsi_update_queue_region_mapping(hw, pf);
2852 if (ret != I40E_SUCCESS)
2853 PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
2855 ret = i40e_dcb_init_configure(dev, TRUE);
2856 if (ret != I40E_SUCCESS) {
2857 PMD_DRV_LOG(INFO, "Failed to flush dcb.");
2858 pf->flags &= ~I40E_FLAG_DCB;
2861 i40e_init_queue_region_conf(dev);
2867 i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
2869 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2872 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
2873 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
2882 i40e_queue_region_get_all_info(struct i40e_pf *pf,
2883 struct i40e_queue_regions *regions_ptr)
2885 struct i40e_queue_regions *info = &pf->queue_region;
2887 rte_memcpy(regions_ptr, info,
2888 sizeof(struct i40e_queue_regions));
2893 int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
2894 enum rte_pmd_i40e_queue_region_op op_type, void *arg)
2896 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2897 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2898 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2901 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2903 if (!is_i40e_supported(dev))
2906 if (!(!i40e_queue_region_pf_check_rss(pf)))
2909 /* This queue region feature only support pf by now. It should
2910 * be called after dev_start, and will be clear after dev_stop.
2911 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
2912 * is just an enable function which server for other configuration,
2913 * it is for all configuration about queue region from up layer,
2914 * at first will only keep in DPDK softwarestored in driver,
2915 * only after "FLUSH_ON", it commit all configuration to HW.
2916 * Because PMD had to set hardware configuration at a time, so
2917 * it will record all up layer command at first.
2918 * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
2919 * just clean all configuration about queue region just now,
2920 * and restore all to DPDK i40e driver default
2921 * config when start up.
2925 case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
2926 ret = i40e_queue_region_set_region(pf,
2927 (struct rte_pmd_i40e_queue_region_conf *)arg);
2929 case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
2930 ret = i40e_queue_region_set_flowtype(pf,
2931 (struct rte_pmd_i40e_queue_region_conf *)arg);
2933 case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
2934 ret = i40e_queue_region_set_user_priority(pf,
2935 (struct rte_pmd_i40e_queue_region_conf *)arg);
2937 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
2938 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
2940 case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
2941 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
2943 case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
2944 ret = i40e_queue_region_get_all_info(pf,
2945 (struct i40e_queue_regions *)arg);
2948 PMD_DRV_LOG(WARNING, "op type (%d) not supported",
2953 I40E_WRITE_FLUSH(hw);