4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_malloc.h>
35 #include <rte_tailq.h>
37 #include "base/i40e_prototype.h"
38 #include "i40e_ethdev.h"
40 #include "rte_pmd_i40e.h"
42 /* The max bandwidth of i40e is 40Gbps. */
43 #define I40E_QOS_BW_MAX 40000
44 /* The bandwidth should be the multiple of 50Mbps. */
45 #define I40E_QOS_BW_GRANULARITY 50
46 /* The min bandwidth weight is 1. */
47 #define I40E_QOS_BW_WEIGHT_MIN 1
48 /* The max bandwidth weight is 127. */
49 #define I40E_QOS_BW_WEIGHT_MAX 127
52 rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
54 struct rte_eth_dev *dev;
57 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
59 dev = &rte_eth_devices[port];
61 if (!is_i40e_supported(dev))
64 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
66 if (vf >= pf->vf_num || !pf->vfs) {
67 PMD_DRV_LOG(ERR, "Invalid argument.");
71 i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
77 rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
79 struct rte_eth_dev *dev;
83 struct i40e_vsi_context ctxt;
86 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
88 dev = &rte_eth_devices[port];
90 if (!is_i40e_supported(dev))
93 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
95 if (vf_id >= pf->vf_num || !pf->vfs) {
96 PMD_DRV_LOG(ERR, "Invalid argument.");
100 vsi = pf->vfs[vf_id].vsi;
102 PMD_DRV_LOG(ERR, "Invalid VSI.");
106 /* Check if it has been already on or off */
107 if (vsi->info.valid_sections &
108 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
110 if ((vsi->info.sec_flags &
111 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
112 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
113 return 0; /* already on */
115 if ((vsi->info.sec_flags &
116 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
117 return 0; /* already off */
121 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
123 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
125 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
127 memset(&ctxt, 0, sizeof(ctxt));
128 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
129 ctxt.seid = vsi->seid;
131 hw = I40E_VSI_TO_HW(vsi);
132 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
133 if (ret != I40E_SUCCESS) {
135 PMD_DRV_LOG(ERR, "Failed to update VSI params");
142 i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
146 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
147 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
150 for (j = 0; j < I40E_VFTA_SIZE; j++) {
154 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
155 if (!(vsi->vfta[j] & (1 << k)))
158 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
162 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
164 ret = i40e_aq_add_vlan(hw, vsi->seid,
165 &vlan_data, 1, NULL);
167 ret = i40e_aq_remove_vlan(hw, vsi->seid,
168 &vlan_data, 1, NULL);
169 if (ret != I40E_SUCCESS) {
171 "Failed to add/rm vlan filter");
181 rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
183 struct rte_eth_dev *dev;
185 struct i40e_vsi *vsi;
187 struct i40e_vsi_context ctxt;
190 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
192 dev = &rte_eth_devices[port];
194 if (!is_i40e_supported(dev))
197 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
199 if (vf_id >= pf->vf_num || !pf->vfs) {
200 PMD_DRV_LOG(ERR, "Invalid argument.");
204 vsi = pf->vfs[vf_id].vsi;
206 PMD_DRV_LOG(ERR, "Invalid VSI.");
210 /* Check if it has been already on or off */
211 if (vsi->vlan_anti_spoof_on == on)
212 return 0; /* already on or off */
214 vsi->vlan_anti_spoof_on = on;
215 if (!vsi->vlan_filter_on) {
216 ret = i40e_add_rm_all_vlan_filter(vsi, on);
218 PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
223 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
225 vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
227 vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
229 memset(&ctxt, 0, sizeof(ctxt));
230 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
231 ctxt.seid = vsi->seid;
233 hw = I40E_VSI_TO_HW(vsi);
234 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
235 if (ret != I40E_SUCCESS) {
237 PMD_DRV_LOG(ERR, "Failed to update VSI params");
244 i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
246 struct i40e_mac_filter *f;
247 struct i40e_macvlan_filter *mv_f;
249 enum rte_mac_filter_type filter_type;
250 int ret = I40E_SUCCESS;
253 /* remove all the MACs */
254 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
255 vlan_num = vsi->vlan_num;
256 filter_type = f->mac_info.filter_type;
257 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
258 filter_type == RTE_MACVLAN_HASH_MATCH) {
260 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
261 return I40E_ERR_PARAM;
263 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
264 filter_type == RTE_MAC_HASH_MATCH)
267 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
269 PMD_DRV_LOG(ERR, "failed to allocate memory");
270 return I40E_ERR_NO_MEMORY;
273 for (i = 0; i < vlan_num; i++) {
274 mv_f[i].filter_type = filter_type;
275 (void)rte_memcpy(&mv_f[i].macaddr,
276 &f->mac_info.mac_addr,
279 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
280 filter_type == RTE_MACVLAN_HASH_MATCH) {
281 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
282 &f->mac_info.mac_addr);
283 if (ret != I40E_SUCCESS) {
289 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
290 if (ret != I40E_SUCCESS) {
303 i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
305 struct i40e_mac_filter *f;
306 struct i40e_macvlan_filter *mv_f;
308 int ret = I40E_SUCCESS;
311 /* restore all the MACs */
312 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
313 if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
314 (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
316 * If vlan_num is 0, that's the first time to add mac,
317 * set mask for vlan_id 0.
319 if (vsi->vlan_num == 0) {
320 i40e_set_vlan_filter(vsi, 0, 1);
323 vlan_num = vsi->vlan_num;
324 } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
325 (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
328 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
330 PMD_DRV_LOG(ERR, "failed to allocate memory");
331 return I40E_ERR_NO_MEMORY;
334 for (i = 0; i < vlan_num; i++) {
335 mv_f[i].filter_type = f->mac_info.filter_type;
336 (void)rte_memcpy(&mv_f[i].macaddr,
337 &f->mac_info.mac_addr,
341 if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
342 f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
343 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
344 &f->mac_info.mac_addr);
345 if (ret != I40E_SUCCESS) {
351 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
352 if (ret != I40E_SUCCESS) {
365 i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
367 struct i40e_vsi_context ctxt;
374 hw = I40E_VSI_TO_HW(vsi);
376 /* Use the FW API if FW >= v5.0 */
377 if (hw->aq.fw_maj_ver < 5) {
378 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
382 /* Check if it has been already on or off */
383 if (vsi->info.valid_sections &
384 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
386 if ((vsi->info.switch_id &
387 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
388 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
389 return 0; /* already on */
391 if ((vsi->info.switch_id &
392 I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
393 return 0; /* already off */
397 /* remove all the MAC and VLAN first */
398 ret = i40e_vsi_rm_mac_filter(vsi);
400 PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
403 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
404 ret = i40e_add_rm_all_vlan_filter(vsi, 0);
406 PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
411 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
413 vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
415 vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
417 memset(&ctxt, 0, sizeof(ctxt));
418 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
419 ctxt.seid = vsi->seid;
421 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
422 if (ret != I40E_SUCCESS) {
423 PMD_DRV_LOG(ERR, "Failed to update VSI params");
427 /* add all the MAC and VLAN back */
428 ret = i40e_vsi_restore_mac_filter(vsi);
431 if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
432 ret = i40e_add_rm_all_vlan_filter(vsi, 1);
441 rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
443 struct rte_eth_dev *dev;
445 struct i40e_pf_vf *vf;
446 struct i40e_vsi *vsi;
450 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
452 dev = &rte_eth_devices[port];
454 if (!is_i40e_supported(dev))
457 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
459 /* setup PF TX loopback */
461 ret = i40e_vsi_set_tx_loopback(vsi, on);
465 /* setup TX loopback for all the VFs */
467 /* if no VF, do nothing. */
471 for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
472 vf = &pf->vfs[vf_id];
475 ret = i40e_vsi_set_tx_loopback(vsi, on);
484 rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
486 struct rte_eth_dev *dev;
488 struct i40e_vsi *vsi;
492 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
494 dev = &rte_eth_devices[port];
496 if (!is_i40e_supported(dev))
499 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
501 if (vf_id >= pf->vf_num || !pf->vfs) {
502 PMD_DRV_LOG(ERR, "Invalid argument.");
506 vsi = pf->vfs[vf_id].vsi;
508 PMD_DRV_LOG(ERR, "Invalid VSI.");
512 hw = I40E_VSI_TO_HW(vsi);
514 ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
516 if (ret != I40E_SUCCESS) {
518 PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
525 rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
527 struct rte_eth_dev *dev;
529 struct i40e_vsi *vsi;
533 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
535 dev = &rte_eth_devices[port];
537 if (!is_i40e_supported(dev))
540 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
542 if (vf_id >= pf->vf_num || !pf->vfs) {
543 PMD_DRV_LOG(ERR, "Invalid argument.");
547 vsi = pf->vfs[vf_id].vsi;
549 PMD_DRV_LOG(ERR, "Invalid VSI.");
553 hw = I40E_VSI_TO_HW(vsi);
555 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
557 if (ret != I40E_SUCCESS) {
559 PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
566 rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
567 struct ether_addr *mac_addr)
569 struct i40e_mac_filter *f;
570 struct rte_eth_dev *dev;
571 struct i40e_pf_vf *vf;
572 struct i40e_vsi *vsi;
576 if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
579 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
581 dev = &rte_eth_devices[port];
583 if (!is_i40e_supported(dev))
586 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
588 if (vf_id >= pf->vf_num || !pf->vfs)
591 vf = &pf->vfs[vf_id];
594 PMD_DRV_LOG(ERR, "Invalid VSI.");
598 ether_addr_copy(mac_addr, &vf->mac_addr);
600 /* Remove all existing mac */
601 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
602 i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
607 /* Set vlan strip on/off for specific VF from host */
609 rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
611 struct rte_eth_dev *dev;
613 struct i40e_vsi *vsi;
616 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
618 dev = &rte_eth_devices[port];
620 if (!is_i40e_supported(dev))
623 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
625 if (vf_id >= pf->vf_num || !pf->vfs) {
626 PMD_DRV_LOG(ERR, "Invalid argument.");
630 vsi = pf->vfs[vf_id].vsi;
635 ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
636 if (ret != I40E_SUCCESS) {
638 PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
644 int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
647 struct rte_eth_dev *dev;
650 struct i40e_vsi *vsi;
651 struct i40e_vsi_context ctxt;
654 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
656 if (vlan_id > ETHER_MAX_VLAN_ID) {
657 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
661 dev = &rte_eth_devices[port];
663 if (!is_i40e_supported(dev))
666 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
667 hw = I40E_PF_TO_HW(pf);
670 * return -ENODEV if SRIOV not enabled, VF number not configured
671 * or no queue assigned.
673 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
677 if (vf_id >= pf->vf_num || !pf->vfs) {
678 PMD_DRV_LOG(ERR, "Invalid VF ID.");
682 vsi = pf->vfs[vf_id].vsi;
684 PMD_DRV_LOG(ERR, "Invalid VSI.");
688 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
689 vsi->info.pvid = vlan_id;
691 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
693 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
695 memset(&ctxt, 0, sizeof(ctxt));
696 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
697 ctxt.seid = vsi->seid;
699 hw = I40E_VSI_TO_HW(vsi);
700 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
701 if (ret != I40E_SUCCESS) {
703 PMD_DRV_LOG(ERR, "Failed to update VSI params");
709 int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
712 struct rte_eth_dev *dev;
714 struct i40e_vsi *vsi;
716 struct i40e_mac_filter_info filter;
717 struct ether_addr broadcast = {
718 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
721 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
724 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
728 dev = &rte_eth_devices[port];
730 if (!is_i40e_supported(dev))
733 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
734 hw = I40E_PF_TO_HW(pf);
736 if (vf_id >= pf->vf_num || !pf->vfs) {
737 PMD_DRV_LOG(ERR, "Invalid VF ID.");
742 * return -ENODEV if SRIOV not enabled, VF number not configured
743 * or no queue assigned.
745 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
746 pf->vf_nb_qps == 0) {
747 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
751 vsi = pf->vfs[vf_id].vsi;
753 PMD_DRV_LOG(ERR, "Invalid VSI.");
758 (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
759 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
760 ret = i40e_vsi_add_mac(vsi, &filter);
762 ret = i40e_vsi_delete_mac(vsi, &broadcast);
765 if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
767 PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
775 int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
777 struct rte_eth_dev *dev;
780 struct i40e_vsi *vsi;
781 struct i40e_vsi_context ctxt;
784 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
787 PMD_DRV_LOG(ERR, "on should be 0 or 1.");
791 dev = &rte_eth_devices[port];
793 if (!is_i40e_supported(dev))
796 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
797 hw = I40E_PF_TO_HW(pf);
800 * return -ENODEV if SRIOV not enabled, VF number not configured
801 * or no queue assigned.
803 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
804 pf->vf_nb_qps == 0) {
805 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
809 if (vf_id >= pf->vf_num || !pf->vfs) {
810 PMD_DRV_LOG(ERR, "Invalid VF ID.");
814 vsi = pf->vfs[vf_id].vsi;
816 PMD_DRV_LOG(ERR, "Invalid VSI.");
820 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
822 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
823 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
825 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
826 vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
829 memset(&ctxt, 0, sizeof(ctxt));
830 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
831 ctxt.seid = vsi->seid;
833 hw = I40E_VSI_TO_HW(vsi);
834 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
835 if (ret != I40E_SUCCESS) {
837 PMD_DRV_LOG(ERR, "Failed to update VSI params");
844 i40e_vlan_filter_count(struct i40e_vsi *vsi)
850 for (j = 0; j < I40E_VFTA_SIZE; j++) {
854 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
855 if (!(vsi->vfta[j] & (1 << k)))
858 vlan_id = j * I40E_UINT32_BIT_SIZE + k;
869 int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
870 uint64_t vf_mask, uint8_t on)
872 struct rte_eth_dev *dev;
875 struct i40e_vsi *vsi;
877 int ret = I40E_SUCCESS;
879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
881 dev = &rte_eth_devices[port];
883 if (!is_i40e_supported(dev))
886 if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
887 PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
892 PMD_DRV_LOG(ERR, "No VF.");
897 PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
901 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
902 hw = I40E_PF_TO_HW(pf);
905 * return -ENODEV if SRIOV not enabled, VF number not configured
906 * or no queue assigned.
908 if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
909 pf->vf_nb_qps == 0) {
910 PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
914 for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
915 if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
916 vsi = pf->vfs[vf_idx].vsi;
918 if (!vsi->vlan_filter_on) {
919 vsi->vlan_filter_on = true;
920 i40e_aq_set_vsi_vlan_promisc(hw,
924 if (!vsi->vlan_anti_spoof_on)
925 i40e_add_rm_all_vlan_filter(
928 ret = i40e_vsi_add_vlan(vsi, vlan_id);
930 ret = i40e_vsi_delete_vlan(vsi, vlan_id);
932 if (!i40e_vlan_filter_count(vsi)) {
933 vsi->vlan_filter_on = false;
934 i40e_aq_set_vsi_vlan_promisc(hw,
943 if (ret != I40E_SUCCESS) {
945 PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
952 rte_pmd_i40e_get_vf_stats(uint8_t port,
954 struct rte_eth_stats *stats)
956 struct rte_eth_dev *dev;
958 struct i40e_vsi *vsi;
960 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
962 dev = &rte_eth_devices[port];
964 if (!is_i40e_supported(dev))
967 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
969 if (vf_id >= pf->vf_num || !pf->vfs) {
970 PMD_DRV_LOG(ERR, "Invalid VF ID.");
974 vsi = pf->vfs[vf_id].vsi;
976 PMD_DRV_LOG(ERR, "Invalid VSI.");
980 i40e_update_vsi_stats(vsi);
982 stats->ipackets = vsi->eth_stats.rx_unicast +
983 vsi->eth_stats.rx_multicast +
984 vsi->eth_stats.rx_broadcast;
985 stats->opackets = vsi->eth_stats.tx_unicast +
986 vsi->eth_stats.tx_multicast +
987 vsi->eth_stats.tx_broadcast;
988 stats->ibytes = vsi->eth_stats.rx_bytes;
989 stats->obytes = vsi->eth_stats.tx_bytes;
990 stats->ierrors = vsi->eth_stats.rx_discards;
991 stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
997 rte_pmd_i40e_reset_vf_stats(uint8_t port,
1000 struct rte_eth_dev *dev;
1002 struct i40e_vsi *vsi;
1004 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1006 dev = &rte_eth_devices[port];
1008 if (!is_i40e_supported(dev))
1011 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1013 if (vf_id >= pf->vf_num || !pf->vfs) {
1014 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1018 vsi = pf->vfs[vf_id].vsi;
1020 PMD_DRV_LOG(ERR, "Invalid VSI.");
1024 vsi->offset_loaded = false;
1025 i40e_update_vsi_stats(vsi);
1031 rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
1033 struct rte_eth_dev *dev;
1035 struct i40e_vsi *vsi;
1040 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1042 dev = &rte_eth_devices[port];
1044 if (!is_i40e_supported(dev))
1047 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1049 if (vf_id >= pf->vf_num || !pf->vfs) {
1050 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1054 vsi = pf->vfs[vf_id].vsi;
1056 PMD_DRV_LOG(ERR, "Invalid VSI.");
1060 if (bw > I40E_QOS_BW_MAX) {
1061 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1066 if (bw % I40E_QOS_BW_GRANULARITY) {
1067 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1068 I40E_QOS_BW_GRANULARITY);
1072 bw /= I40E_QOS_BW_GRANULARITY;
1074 hw = I40E_VSI_TO_HW(vsi);
1077 if (bw == vsi->bw_info.bw_limit) {
1079 "No change for VF max bandwidth. Nothing to do.");
1084 * VF bandwidth limitation and TC bandwidth limitation cannot be
1085 * enabled in parallel, quit if TC bandwidth limitation is enabled.
1087 * If bw is 0, means disable bandwidth limitation. Then no need to
1088 * check TC bandwidth limitation.
1091 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1092 if ((vsi->enabled_tc & BIT_ULL(i)) &&
1093 vsi->bw_info.bw_ets_credits[i])
1096 if (i != I40E_MAX_TRAFFIC_CLASS) {
1098 "TC max bandwidth has been set on this VF,"
1099 " please disable it first.");
1104 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
1107 "Failed to set VF %d bandwidth, err(%d).",
1112 /* Store the configuration. */
1113 vsi->bw_info.bw_limit = (uint16_t)bw;
1114 vsi->bw_info.bw_max = 0;
1120 rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
1121 uint8_t tc_num, uint8_t *bw_weight)
1123 struct rte_eth_dev *dev;
1125 struct i40e_vsi *vsi;
1127 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
1131 bool b_change = false;
1133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1135 dev = &rte_eth_devices[port];
1137 if (!is_i40e_supported(dev))
1140 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1142 if (vf_id >= pf->vf_num || !pf->vfs) {
1143 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1147 vsi = pf->vfs[vf_id].vsi;
1149 PMD_DRV_LOG(ERR, "Invalid VSI.");
1153 if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
1154 PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
1155 I40E_MAX_TRAFFIC_CLASS);
1160 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1161 if (vsi->enabled_tc & BIT_ULL(i))
1164 if (sum != tc_num) {
1166 "Weight should be set for all %d enabled TCs.",
1172 for (i = 0; i < tc_num; i++) {
1173 if (!bw_weight[i]) {
1175 "The weight should be 1 at least.");
1178 sum += bw_weight[i];
1182 "The summary of the TC weight should be 100.");
1187 * Create the configuration for all the TCs.
1189 memset(&tc_bw, 0, sizeof(tc_bw));
1190 tc_bw.tc_valid_bits = vsi->enabled_tc;
1192 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1193 if (vsi->enabled_tc & BIT_ULL(i)) {
1195 vsi->bw_info.bw_ets_share_credits[i])
1198 tc_bw.tc_bw_credits[i] = bw_weight[j];
1206 "No change for TC allocated bandwidth."
1211 hw = I40E_VSI_TO_HW(vsi);
1213 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
1216 "Failed to set VF %d TC bandwidth weight, err(%d).",
1221 /* Store the configuration. */
1223 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1224 if (vsi->enabled_tc & BIT_ULL(i)) {
1225 vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
1234 rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
1235 uint8_t tc_no, uint32_t bw)
1237 struct rte_eth_dev *dev;
1239 struct i40e_vsi *vsi;
1241 struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
1245 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1247 dev = &rte_eth_devices[port];
1249 if (!is_i40e_supported(dev))
1252 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1254 if (vf_id >= pf->vf_num || !pf->vfs) {
1255 PMD_DRV_LOG(ERR, "Invalid VF ID.");
1259 vsi = pf->vfs[vf_id].vsi;
1261 PMD_DRV_LOG(ERR, "Invalid VSI.");
1265 if (bw > I40E_QOS_BW_MAX) {
1266 PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
1271 if (bw % I40E_QOS_BW_GRANULARITY) {
1272 PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
1273 I40E_QOS_BW_GRANULARITY);
1277 bw /= I40E_QOS_BW_GRANULARITY;
1279 if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
1280 PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
1281 I40E_MAX_TRAFFIC_CLASS);
1285 hw = I40E_VSI_TO_HW(vsi);
1287 if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
1288 PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
1294 if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
1296 "No change for TC max bandwidth. Nothing to do.");
1301 * VF bandwidth limitation and TC bandwidth limitation cannot be
1302 * enabled in parallel, disable VF bandwidth limitation if it's
1304 * If bw is 0, means disable bandwidth limitation. Then no need to
1305 * care about VF bandwidth limitation configuration.
1307 if (bw && vsi->bw_info.bw_limit) {
1308 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
1311 "Failed to disable VF(%d)"
1312 " bandwidth limitation, err(%d).",
1318 "VF max bandwidth is disabled according"
1319 " to TC max bandwidth setting.");
1323 * Get all the TCs' info to create a whole picture.
1324 * Because the incremental change isn't permitted.
1326 memset(&tc_bw, 0, sizeof(tc_bw));
1327 tc_bw.tc_valid_bits = vsi->enabled_tc;
1328 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1329 if (vsi->enabled_tc & BIT_ULL(i)) {
1330 tc_bw.tc_bw_credits[i] =
1332 vsi->bw_info.bw_ets_credits[i]);
1335 tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
1337 ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
1340 "Failed to set VF %d TC %d max bandwidth, err(%d).",
1345 /* Store the configuration. */
1346 vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
1352 rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
1354 struct rte_eth_dev *dev;
1356 struct i40e_vsi *vsi;
1357 struct i40e_veb *veb;
1359 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
1363 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1365 dev = &rte_eth_devices[port];
1367 if (!is_i40e_supported(dev))
1370 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1374 PMD_DRV_LOG(ERR, "Invalid VSI.");
1380 PMD_DRV_LOG(ERR, "Invalid VEB.");
1384 if ((tc_map & veb->enabled_tc) != tc_map) {
1386 "TC bitmap isn't the subset of enabled TCs 0x%x.",
1391 if (tc_map == veb->strict_prio_tc) {
1392 PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
1396 hw = I40E_VSI_TO_HW(vsi);
1398 /* Disable DCBx if it's the first time to set strict priority. */
1399 if (!veb->strict_prio_tc) {
1400 ret = i40e_aq_stop_lldp(hw, true, NULL);
1403 "Failed to disable DCBx as it's already"
1407 "DCBx is disabled according to strict"
1408 " priority setting.");
1411 memset(&ets_data, 0, sizeof(ets_data));
1412 ets_data.tc_valid_bits = veb->enabled_tc;
1413 ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
1414 ets_data.tc_strict_priority_flags = tc_map;
1415 /* Get all TCs' bandwidth. */
1416 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1417 if (veb->enabled_tc & BIT_ULL(i)) {
1418 /* For rubust, if bandwidth is 0, use 1 instead. */
1419 if (veb->bw_info.bw_ets_share_credits[i])
1420 ets_data.tc_bw_share_credits[i] =
1421 veb->bw_info.bw_ets_share_credits[i];
1423 ets_data.tc_bw_share_credits[i] =
1424 I40E_QOS_BW_WEIGHT_MIN;
1428 if (!veb->strict_prio_tc)
1429 ret = i40e_aq_config_switch_comp_ets(
1430 hw, veb->uplink_seid,
1431 &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
1434 ret = i40e_aq_config_switch_comp_ets(
1435 hw, veb->uplink_seid,
1436 &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
1439 ret = i40e_aq_config_switch_comp_ets(
1440 hw, veb->uplink_seid,
1441 &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
1446 "Failed to set TCs' strict priority mode."
1451 veb->strict_prio_tc = tc_map;
1453 /* Enable DCBx again, if all the TCs' strict priority disabled. */
1455 ret = i40e_aq_start_lldp(hw, NULL);
1458 "Failed to enable DCBx, err(%d).", ret);
1463 "DCBx is enabled again according to strict"
1464 " priority setting.");
1470 #define I40E_PROFILE_INFO_SIZE 48
1471 #define I40E_MAX_PROFILE_NUM 16
1474 i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
1475 uint32_t track_id, uint8_t *profile_info_sec,
1478 struct i40e_profile_section_header *sec = NULL;
1479 struct i40e_profile_info *pinfo;
1481 sec = (struct i40e_profile_section_header *)profile_info_sec;
1483 sec->data_end = sizeof(struct i40e_profile_section_header) +
1484 sizeof(struct i40e_profile_info);
1485 sec->section.type = SECTION_TYPE_INFO;
1486 sec->section.offset = sizeof(struct i40e_profile_section_header);
1487 sec->section.size = sizeof(struct i40e_profile_info);
1488 pinfo = (struct i40e_profile_info *)(profile_info_sec +
1489 sec->section.offset);
1490 pinfo->track_id = track_id;
1491 memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
1492 memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
1494 pinfo->op = I40E_DDP_ADD_TRACKID;
1496 pinfo->op = I40E_DDP_REMOVE_TRACKID;
1499 static enum i40e_status_code
1500 i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
1502 enum i40e_status_code status = I40E_SUCCESS;
1503 struct i40e_profile_section_header *sec;
1505 uint32_t offset = 0;
1508 sec = (struct i40e_profile_section_header *)profile_info_sec;
1509 track_id = ((struct i40e_profile_info *)(profile_info_sec +
1510 sec->section.offset))->track_id;
1512 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
1513 track_id, &offset, &info, NULL);
1515 PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
1516 "offset %d, info %d",
1522 #define I40E_PROFILE_INFO_SIZE 48
1523 #define I40E_MAX_PROFILE_NUM 16
1525 /* Check if the profile info exists */
1527 i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
1529 struct rte_eth_dev *dev = &rte_eth_devices[port];
1530 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1532 struct rte_pmd_i40e_profile_list *p_list;
1533 struct rte_pmd_i40e_profile_info *pinfo, *p;
1537 buff = rte_zmalloc("pinfo_list",
1538 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1541 PMD_DRV_LOG(ERR, "failed to allocate memory");
1545 ret = i40e_aq_get_ddp_list(
1547 (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
1550 PMD_DRV_LOG(ERR, "Failed to get profile info list.");
1554 p_list = (struct rte_pmd_i40e_profile_list *)buff;
1555 pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
1556 sizeof(struct i40e_profile_section_header));
1557 for (i = 0; i < p_list->p_count; i++) {
1558 p = &p_list->p_info[i];
1559 if ((pinfo->track_id == p->track_id) &&
1560 !memcmp(&pinfo->version, &p->version,
1561 sizeof(struct i40e_ddp_version)) &&
1562 !memcmp(&pinfo->name, &p->name,
1563 I40E_DDP_NAME_SIZE)) {
1564 PMD_DRV_LOG(INFO, "Profile exists.");
1575 rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
1577 enum rte_pmd_i40e_package_op op)
1579 struct rte_eth_dev *dev;
1581 struct i40e_package_header *pkg_hdr;
1582 struct i40e_generic_seg_header *profile_seg_hdr;
1583 struct i40e_generic_seg_header *metadata_seg_hdr;
1585 uint8_t *profile_info_sec;
1587 enum i40e_status_code status = I40E_SUCCESS;
1589 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1591 dev = &rte_eth_devices[port];
1593 if (!is_i40e_supported(dev))
1596 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598 if (size < (sizeof(struct i40e_package_header) +
1599 sizeof(struct i40e_metadata_segment) +
1600 sizeof(uint32_t) * 2)) {
1601 PMD_DRV_LOG(ERR, "Buff is invalid.");
1605 pkg_hdr = (struct i40e_package_header *)buff;
1608 PMD_DRV_LOG(ERR, "Failed to fill the package structure");
1612 if (pkg_hdr->segment_count < 2) {
1613 PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
1617 /* Find metadata segment */
1618 metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
1620 if (!metadata_seg_hdr) {
1621 PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
1624 track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
1626 /* Find profile segment */
1627 profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
1629 if (!profile_seg_hdr) {
1630 PMD_DRV_LOG(ERR, "Failed to find profile segment header");
1634 profile_info_sec = rte_zmalloc(
1635 "i40e_profile_info",
1636 sizeof(struct i40e_profile_section_header) +
1637 sizeof(struct i40e_profile_info),
1639 if (!profile_info_sec) {
1640 PMD_DRV_LOG(ERR, "Failed to allocate memory");
1644 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
1645 /* Check if the profile exists */
1646 i40e_generate_profile_info_sec(
1647 ((struct i40e_profile_segment *)profile_seg_hdr)->name,
1648 &((struct i40e_profile_segment *)profile_seg_hdr)->version,
1649 track_id, profile_info_sec, 1);
1650 is_exist = i40e_check_profile_info(port, profile_info_sec);
1652 PMD_DRV_LOG(ERR, "Profile already exists.");
1653 rte_free(profile_info_sec);
1655 } else if (is_exist < 0) {
1656 PMD_DRV_LOG(ERR, "Failed to check profile.");
1657 rte_free(profile_info_sec);
1661 /* Write profile to HW */
1662 status = i40e_write_profile(
1664 (struct i40e_profile_segment *)profile_seg_hdr,
1667 PMD_DRV_LOG(ERR, "Failed to write profile.");
1668 rte_free(profile_info_sec);
1672 /* Add profile info to info list */
1673 status = i40e_add_rm_profile_info(hw, profile_info_sec);
1675 PMD_DRV_LOG(ERR, "Failed to add profile info.");
1677 PMD_DRV_LOG(ERR, "Operation not supported.");
1680 rte_free(profile_info_sec);
1685 rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
1687 struct rte_eth_dev *dev;
1689 enum i40e_status_code status = I40E_SUCCESS;
1691 RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1693 dev = &rte_eth_devices[port];
1695 if (!is_i40e_supported(dev))
1698 if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
1701 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1703 status = i40e_aq_get_ddp_list(hw, (void *)buff,