X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Frte_pmd_i40e.c;h=0fbf79ca35e0f1501745d91a1810a5eb1b819a15;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=0cd2d7abe84475260fa1eacae04678682a356b9f;hpb=e163c18a15b04be05e040a9d0d53009f3d0ead71;p=dpdk.git diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index 0cd2d7abe8..0fbf79ca35 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -1,47 +1,20 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ +#include #include #include #include "base/i40e_prototype.h" +#include "base/i40e_dcb.h" #include "i40e_ethdev.h" #include "i40e_pf.h" #include "i40e_rxtx.h" #include "rte_pmd_i40e.h" int -rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) +rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -66,7 +39,7 @@ rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) } int -rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -170,7 +143,7 @@ i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add) } int -rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -366,7 +339,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) hw = I40E_VSI_TO_HW(vsi); /* Use the FW API if FW >= v5.0 */ - if (hw->aq.fw_maj_ver < 5) { + if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) { PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); return -ENOTSUP; } @@ -430,7 +403,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) } int -rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) +rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -473,7 +446,7 @@ rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) } int -rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -514,7 +487,7 @@ rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) } int -rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -555,8 +528,8 @@ rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) } int -rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, - struct ether_addr *mac_addr) +rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) { struct i40e_mac_filter *f; struct rte_eth_dev *dev; @@ -587,7 +560,7 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, return -EINVAL; } - ether_addr_copy(mac_addr, &vf->mac_addr); + rte_ether_addr_copy(mac_addr, &vf->mac_addr); /* Remove all existing mac */ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) @@ -598,9 +571,52 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, return 0; } +static const struct rte_ether_addr null_mac_addr; + +int +rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + struct i40e_pf_vf *vf; + struct i40e_vsi *vsi; + struct i40e_pf *pf; + + if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) + return -EINVAL; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) + return -EINVAL; + + vf = &pf->vfs[vf_id]; + vsi = vf->vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (rte_is_same_ether_addr(mac_addr, &vf->mac_addr)) + /* Reset the mac with NULL address */ + rte_ether_addr_copy(&null_mac_addr, &vf->mac_addr); + + /* Remove the mac */ + i40e_vsi_delete_mac(vsi, mac_addr); + + return 0; +} + /* Set vlan strip on/off for specific VF from host */ int -rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) +rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -635,7 +651,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) return ret; } -int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id, uint16_t vlan_id) { struct rte_eth_dev *dev; @@ -647,7 +663,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (vlan_id > ETHER_MAX_VLAN_ID) { + if (vlan_id > RTE_ETHER_MAX_VLAN_ID) { PMD_DRV_LOG(ERR, "Invalid VLAN ID."); return -EINVAL; } @@ -700,7 +716,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, return ret; } -int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, +int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; @@ -708,7 +724,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, struct i40e_vsi *vsi; struct i40e_hw *hw; struct i40e_mac_filter_info filter; - struct ether_addr broadcast = { + struct rte_ether_addr broadcast = { .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; int ret; @@ -749,7 +765,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, } if (on) { - rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + rte_memcpy(&filter.mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; ret = i40e_vsi_add_mac(vsi, &filter); } else { @@ -766,7 +782,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, return ret; } -int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) +int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -860,7 +876,7 @@ i40e_vlan_filter_count(struct i40e_vsi *vsi) return count; } -int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, +int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id, uint64_t vf_mask, uint8_t on) { struct rte_eth_dev *dev; @@ -877,7 +893,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, if (!is_i40e_supported(dev)) return -ENOTSUP; - if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) { + if (vlan_id > RTE_ETHER_MAX_VLAN_ID || !vlan_id) { PMD_DRV_LOG(ERR, "Invalid VLAN ID."); return -EINVAL; } @@ -943,7 +959,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, } int -rte_pmd_i40e_get_vf_stats(uint8_t port, +rte_pmd_i40e_get_vf_stats(uint16_t port, uint16_t vf_id, struct rte_eth_stats *stats) { @@ -988,7 +1004,7 @@ rte_pmd_i40e_get_vf_stats(uint8_t port, } int -rte_pmd_i40e_reset_vf_stats(uint8_t port, +rte_pmd_i40e_reset_vf_stats(uint16_t port, uint16_t vf_id) { struct rte_eth_dev *dev; @@ -1022,7 +1038,7 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port, } int -rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) +rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -1111,7 +1127,7 @@ rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) } int -rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, +rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id, uint8_t tc_num, uint8_t *bw_weight) { struct rte_eth_dev *dev; @@ -1225,7 +1241,7 @@ rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, } int -rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id, +rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id, uint8_t tc_no, uint32_t bw) { struct rte_eth_dev *dev; @@ -1343,7 +1359,7 @@ rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id, } int -rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map) +rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map) { struct rte_eth_dev *dev; struct i40e_pf *pf; @@ -1515,7 +1531,7 @@ i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec) /* Check if the profile info exists */ static int -i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) +i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) { struct rte_eth_dev *dev = &rte_eth_devices[port]; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1524,7 +1540,14 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) struct rte_pmd_i40e_profile_info *pinfo, *p; uint32_t i; int ret; + static const uint32_t group_mask = 0x00ff0000; + pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec + + sizeof(struct i40e_profile_section_header)); + if (pinfo->track_id == 0) { + PMD_DRV_LOG(INFO, "Read-only profile."); + return 0; + } buff = rte_zmalloc("pinfo_list", (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4), 0); @@ -1543,8 +1566,6 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) return -1; } p_list = (struct rte_pmd_i40e_profile_list *)buff; - pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec + - sizeof(struct i40e_profile_section_header)); for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; if (pinfo->track_id == p->track_id) { @@ -1553,13 +1574,37 @@ i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec) return 1; } } + /* profile with group id 0xff is compatible with any other profile */ + if ((pinfo->track_id & group_mask) == group_mask) { + rte_free(buff); + return 0; + } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == 0) { + PMD_DRV_LOG(INFO, "Profile of the group 0 exists."); + rte_free(buff); + return 2; + } + } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == group_mask) + continue; + if ((pinfo->track_id & group_mask) != + (p->track_id & group_mask)) { + PMD_DRV_LOG(INFO, "Profile of different group exists."); + rte_free(buff); + return 3; + } + } rte_free(buff); return 0; } int -rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, +rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, uint32_t size, enum rte_pmd_i40e_package_op op) { @@ -1572,6 +1617,7 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, uint8_t *profile_info_sec; int is_exist; enum i40e_status_code status = I40E_SUCCESS; + static const uint32_t type_mask = 0xff000000; if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && op != RTE_PMD_I40E_PKG_OP_WR_ONLY && @@ -1608,8 +1654,6 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, return -EINVAL; } - i40e_update_customized_info(dev, buff, size); - /* Find metadata segment */ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr); @@ -1623,6 +1667,10 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, return -EINVAL; } + /* force read-only track_id for type 0 */ + if ((track_id & type_mask) == 0) + track_id = 0; + /* Find profile segment */ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr); @@ -1656,12 +1704,18 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { if (is_exist) { - PMD_DRV_LOG(ERR, "Profile already exists."); + if (is_exist == 1) + PMD_DRV_LOG(ERR, "Profile already exists."); + else if (is_exist == 2) + PMD_DRV_LOG(ERR, "Profile of group 0 already exists."); + else if (is_exist == 3) + PMD_DRV_LOG(ERR, "Profile of different group already exists"); + i40e_update_customized_info(dev, buff, size, op); rte_free(profile_info_sec); return -EEXIST; } } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { - if (!is_exist) { + if (is_exist != 1) { PMD_DRV_LOG(ERR, "Profile does not exist."); rte_free(profile_info_sec); return -EACCES; @@ -1704,6 +1758,10 @@ rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff, } } + if (op == RTE_PMD_I40E_PKG_OP_WR_ADD || + op == RTE_PMD_I40E_PKG_OP_WR_DEL) + i40e_update_customized_info(dev, buff, size, op); + rte_free(profile_info_sec); return status; } @@ -1926,7 +1984,7 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size, tlv = (struct i40e_profile_tlv_section_record *)&proto[1]; for (i = j = 0; i < nb_rec; j++) { pinfo[j].proto_id = tlv->data[0]; - strncpy(pinfo[j].name, (const char *)&tlv->data[1], + strlcpy(pinfo[j].name, (const char *)&tlv->data[1], I40E_DDP_NAME_SIZE); i += tlv->len; tlv = &tlv[tlv->len]; @@ -2038,7 +2096,7 @@ int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size, } int -rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size) +rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size) { struct rte_eth_dev *dev; struct i40e_hw *hw; @@ -2081,7 +2139,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type) l2 != RTE_PTYPE_L2_ETHER_LLDP && l2 != RTE_PTYPE_L2_ETHER_NSH && l2 != RTE_PTYPE_L2_ETHER_VLAN && - l2 != RTE_PTYPE_L2_ETHER_QINQ) + l2 != RTE_PTYPE_L2_ETHER_QINQ && + l2 != RTE_PTYPE_L2_ETHER_PPPOE) return -1; if (l3 && @@ -2110,7 +2169,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type) tnl != RTE_PTYPE_TUNNEL_GENEVE && tnl != RTE_PTYPE_TUNNEL_GRENAT && tnl != RTE_PTYPE_TUNNEL_GTPC && - tnl != RTE_PTYPE_TUNNEL_GTPU) + tnl != RTE_PTYPE_TUNNEL_GTPU && + tnl != RTE_PTYPE_TUNNEL_L2TP) return -1; if (il2 && @@ -2168,7 +2228,7 @@ static int check_invalid_ptype_mapping( int rte_pmd_i40e_ptype_mapping_update( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t count, uint8_t exclusive) @@ -2204,7 +2264,7 @@ rte_pmd_i40e_ptype_mapping_update( return 0; } -int rte_pmd_i40e_ptype_mapping_reset(uint8_t port) +int rte_pmd_i40e_ptype_mapping_reset(uint16_t port) { struct rte_eth_dev *dev; @@ -2221,7 +2281,7 @@ int rte_pmd_i40e_ptype_mapping_reset(uint8_t port) } int rte_pmd_i40e_ptype_mapping_get( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_ptype_mapping *mapping_items, uint16_t size, uint16_t *count, @@ -2255,7 +2315,7 @@ int rte_pmd_i40e_ptype_mapping_get( return 0; } -int rte_pmd_i40e_ptype_mapping_replace(uint8_t port, +int rte_pmd_i40e_ptype_mapping_replace(uint16_t port, uint32_t target, uint8_t mask, uint32_t pkt_type) @@ -2294,8 +2354,8 @@ int rte_pmd_i40e_ptype_mapping_replace(uint8_t port, } int -rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id, - struct ether_addr *mac_addr) +rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id, + struct rte_ether_addr *mac_addr) { struct rte_eth_dev *dev; struct i40e_pf_vf *vf; @@ -2327,7 +2387,7 @@ rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id, } mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; - ether_addr_copy(mac_addr, &mac_filter.mac_addr); + rte_ether_addr_copy(mac_addr, &mac_filter.mac_addr); ret = i40e_vsi_add_mac(vsi, &mac_filter); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to add MAC filter."); @@ -2337,7 +2397,7 @@ rte_pmd_i40e_add_vf_mac_addr(uint8_t port, uint16_t vf_id, return 0; } -int rte_pmd_i40e_flow_type_mapping_reset(uint8_t port) +int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port) { struct rte_eth_dev *dev; @@ -2354,7 +2414,7 @@ int rte_pmd_i40e_flow_type_mapping_reset(uint8_t port) } int rte_pmd_i40e_flow_type_mapping_get( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_flow_type_mapping *mapping_items) { struct rte_eth_dev *dev; @@ -2380,7 +2440,7 @@ int rte_pmd_i40e_flow_type_mapping_get( int rte_pmd_i40e_flow_type_mapping_update( - uint8_t port, + uint16_t port, struct rte_pmd_i40e_flow_type_mapping *mapping_items, uint16_t count, uint8_t exclusive) @@ -2430,3 +2490,715 @@ rte_pmd_i40e_flow_type_mapping_update( return 0; } + +int +rte_pmd_i40e_query_vfid_by_mac(uint16_t port, + const struct rte_ether_addr *vf_mac) +{ + struct rte_eth_dev *dev; + struct rte_ether_addr *mac; + struct i40e_pf *pf; + int vf_id; + struct i40e_pf_vf *vf; + uint16_t vf_num; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + vf_num = pf->vf_num; + + for (vf_id = 0; vf_id < vf_num; vf_id++) { + vf = &pf->vfs[vf_id]; + mac = &vf->mac_addr; + + if (rte_is_same_ether_addr(mac, vf_mac)) + return vf_id; + } + + return -EINVAL; +} + +static int +i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint16_t i; + struct i40e_vsi *vsi = pf->main_vsi; + uint16_t queue_offset, bsf, tc_index; + struct i40e_vsi_context ctxt; + struct i40e_aqc_vsi_properties_data *vsi_info; + struct i40e_queue_regions *region_info = + &pf->queue_region; + int32_t ret = -EINVAL; + + if (!region_info->queue_region_number) { + PMD_INIT_LOG(ERR, "there is no that region id been set before"); + return ret; + } + + memset(&ctxt, 0, sizeof(struct i40e_vsi_context)); + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + vsi_info = &ctxt.info; + + memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8); + memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16); + + /* Configure queue region and queue mapping parameters, + * for enabled queue region, allocate queues to this region. + */ + + for (i = 0; i < region_info->queue_region_number; i++) { + tc_index = region_info->region[i].region_id; + bsf = rte_bsf32(region_info->region[i].queue_num); + queue_offset = region_info->region[i].queue_start_index; + vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16( + (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + } + + /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */ + vsi_info->mapping_flags |= + rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG); + vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue); + vsi_info->valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ", + hw->aq.asq_last_status); + return ret; + } + /* update the local VSI info with updated queue map */ + rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping, + sizeof(vsi->info.tc_mapping)); + rte_memcpy(&vsi->info.queue_mapping, + &ctxt.info.queue_mapping, + sizeof(vsi->info.queue_mapping)); + vsi->info.mapping_flags = ctxt.info.mapping_flags; + vsi->info.valid_sections = 0; + + return 0; +} + + +static int +i40e_queue_region_set_region(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *conf_ptr) +{ + uint16_t i; + struct i40e_vsi *main_vsi = pf->main_vsi; + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + + if (!((rte_is_power_of_2(conf_ptr->queue_num)) && + conf_ptr->queue_num <= 64)) { + PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return ret; + } + + if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if ((conf_ptr->queue_start_index + conf_ptr->queue_num) + > main_vsi->nb_used_qps) { + PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (conf_ptr->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number && + i <= I40E_REGION_MAX_INDEX) { + info->region[i].region_id = conf_ptr->region_id; + info->region[i].queue_num = conf_ptr->queue_num; + info->region[i].queue_start_index = + conf_ptr->queue_start_index; + info->queue_region_number++; + } else { + PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before"); + return ret; + } + + return 0; +} + +static int +i40e_queue_region_set_flowtype(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + uint16_t i, j; + uint16_t region_index, flowtype_index; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) { + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); + return ret; + } + + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + if (rss_region_conf->hw_flowtype == + info->region[i].hw_flowtype[j]) { + PMD_DRV_LOG(ERR, "that hw_flowtype has been set before"); + return 0; + } + } + } + + flowtype_index = info->region[region_index].flowtype_num; + info->region[region_index].hw_flowtype[flowtype_index] = + rss_region_conf->hw_flowtype; + info->region[region_index].flowtype_num++; + + return 0; +} + +static void +i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + uint8_t hw_flowtype; + uint32_t pfqf_hregion; + uint16_t i, j, index; + struct i40e_queue_regions *info = &pf->queue_region; + + /* For the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].flowtype_num; j++) { + hw_flowtype = info->region[i].hw_flowtype[j]; + index = hw_flowtype >> 3; + pfqf_hregion = + i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index)); + + if ((hw_flowtype & 0x7) == 0) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_0_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT; + } else if ((hw_flowtype & 0x7) == 1) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_1_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT; + } else if ((hw_flowtype & 0x7) == 2) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_2_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT; + } else if ((hw_flowtype & 0x7) == 3) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_3_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT; + } else if ((hw_flowtype & 0x7) == 4) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_4_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT; + } else if ((hw_flowtype & 0x7) == 5) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_5_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT; + } else if ((hw_flowtype & 0x7) == 6) { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_6_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT; + } else { + pfqf_hregion |= info->region[i].region_id << + I40E_PFQF_HREGION_REGION_7_SHIFT; + pfqf_hregion |= 1 << + I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT; + } + + i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index), + pfqf_hregion); + } + } +} + +static int +i40e_queue_region_set_user_priority(struct i40e_pf *pf, + struct rte_pmd_i40e_queue_region_conf *rss_region_conf) +{ + struct i40e_queue_regions *info = &pf->queue_region; + int32_t ret = -EINVAL; + uint16_t i, j, region_index; + + if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return ret; + } + + if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the region_id max index is 7"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) + if (rss_region_conf->region_id == info->region[i].region_id) + break; + + if (i == info->queue_region_number) { + PMD_DRV_LOG(ERR, "that region id has not been set before"); + ret = -EINVAL; + return ret; + } + + region_index = i; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + if (info->region[i].user_priority[j] == + rss_region_conf->user_priority) { + PMD_DRV_LOG(ERR, "that user priority has been set before"); + return 0; + } + } + } + + j = info->region[region_index].user_priority_num; + info->region[region_index].user_priority[j] = + rss_region_conf->user_priority; + info->region[region_index].user_priority_num++; + + return 0; +} + +static int +i40e_queue_region_dcb_configure(struct i40e_hw *hw, + struct i40e_pf *pf) +{ + struct i40e_dcbx_config dcb_cfg_local; + struct i40e_dcbx_config *dcb_cfg; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config; + int32_t ret = -EINVAL; + uint16_t i, j, prio_index, region_index; + uint8_t tc_map, tc_bw, bw_lf, dcb_flag = 0; + + if (!info->queue_region_number) { + PMD_DRV_LOG(ERR, "No queue region been set before"); + return ret; + } + + for (i = 0; i < info->queue_region_number; i++) { + if (info->region[i].user_priority_num) { + dcb_flag = 1; + break; + } + } + + if (dcb_flag == 0) + return 0; + + dcb_cfg = &dcb_cfg_local; + memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config)); + + /* assume each tc has the same bw */ + tc_bw = I40E_MAX_PERCENT / info->queue_region_number; + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tcbwtable[i] = tc_bw; + /* to ensure the sum of tcbw is equal to 100 */ + bw_lf = I40E_MAX_PERCENT % info->queue_region_number; + for (i = 0; i < bw_lf; i++) + dcb_cfg->etscfg.tcbwtable[i]++; + + /* assume each tc has the same Transmission Selection Algorithm */ + for (i = 0; i < info->queue_region_number; i++) + dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + + for (i = 0; i < info->queue_region_number; i++) { + for (j = 0; j < info->region[i].user_priority_num; j++) { + prio_index = info->region[i].user_priority[j]; + region_index = info->region[i].region_id; + dcb_cfg->etscfg.prioritytable[prio_index] = + region_index; + } + } + + /* FW needs one App to configure HW */ + dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM; + dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO; + dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t); + + dcb_cfg->pfc.willing = 0; + dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + dcb_cfg->pfc.pfcenable = tc_map; + + /* Copy the new config to the current config */ + *old_cfg = *dcb_cfg; + old_cfg->etsrec = old_cfg->etscfg; + ret = i40e_set_dcb_config(hw); + + if (ret) { + PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + + return 0; +} + +int +i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on) +{ + int32_t ret = -EINVAL; + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_vsi *main_vsi = pf->main_vsi; + + if (on) { + i40e_queue_region_pf_flowtype_conf(hw, pf); + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + return ret; + } + + ret = i40e_queue_region_dcb_configure(hw, pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + return ret; + } + + return 0; + } + + if (info->queue_region_number) { + info->queue_region_number = 1; + info->region[0].queue_num = main_vsi->nb_used_qps; + info->region[0].queue_start_index = 0; + + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + + i40e_init_queue_region_conf(dev); + } + return 0; +} + +static int +i40e_queue_region_pf_check_rss(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint64_t hena; + + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; + + if (!hena) + return -ENOTSUP; + + return 0; +} + +static int +i40e_queue_region_get_all_info(struct i40e_pf *pf, + struct i40e_queue_regions *regions_ptr) +{ + struct i40e_queue_regions *info = &pf->queue_region; + + rte_memcpy(regions_ptr, info, + sizeof(struct i40e_queue_regions)); + + return 0; +} + +int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, + enum rte_pmd_i40e_queue_region_op op_type, void *arg) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (!(!i40e_queue_region_pf_check_rss(pf))) + return -ENOTSUP; + + /* This queue region feature only support pf by now. It should + * be called after dev_start, and will be clear after dev_stop. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON" + * is just an enable function which server for other configuration, + * it is for all configuration about queue region from up layer, + * at first will only keep in DPDK softwarestored in driver, + * only after "FLUSH_ON", it commit all configuration to HW. + * Because PMD had to set hardware configuration at a time, so + * it will record all up layer command at first. + * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is + * just clean all configuration about queue region just now, + * and restore all to DPDK i40e driver default + * config when start up. + */ + + switch (op_type) { + case RTE_PMD_I40E_RSS_QUEUE_REGION_SET: + ret = i40e_queue_region_set_region(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET: + ret = i40e_queue_region_set_flowtype(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET: + ret = i40e_queue_region_set_user_priority(pf, + (struct rte_pmd_i40e_queue_region_conf *)arg); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF: + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + break; + case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET: + ret = i40e_queue_region_get_all_info(pf, + (struct i40e_queue_regions *)arg); + break; + default: + PMD_DRV_LOG(WARNING, "op type (%d) not supported", + op_type); + ret = -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + + return ret; +} + +int rte_pmd_i40e_flow_add_del_packet_template( + uint16_t port, + const struct rte_pmd_i40e_pkt_template_conf *conf, + uint8_t add) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port]; + struct i40e_fdir_filter_conf filter_conf; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + memset(&filter_conf, 0, sizeof(filter_conf)); + filter_conf.soft_id = conf->soft_id; + filter_conf.input.flow.raw_flow.pctype = conf->input.pctype; + filter_conf.input.flow.raw_flow.packet = conf->input.packet; + filter_conf.input.flow.raw_flow.length = conf->input.length; + filter_conf.input.flow_ext.pkt_template = true; + + filter_conf.action.rx_queue = conf->action.rx_queue; + filter_conf.action.behavior = + (enum i40e_fdir_behavior)conf->action.behavior; + filter_conf.action.report_status = + (enum i40e_fdir_status)conf->action.report_status; + filter_conf.action.flex_off = conf->action.flex_off; + + return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add); +} + +int +rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + memset(inset, 0, sizeof(struct rte_pmd_i40e_inset)); + + switch (inset_type) { + case INSET_HASH: + /* Get input set */ + inset_reg = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + /* Get field mask */ + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype)); + break; + case INSET_FDIR: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype)); + break; + case INSET_FDIR_FLX: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1)); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + inset->inset = inset_reg; + + for (i = 0; i < 2; i++) { + inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F); + inset->mask[i].mask = mask_reg[i] & 0xFFFF; + } + + return 0; +} + +int +rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + struct i40e_pf *pf; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Input set configuration is not supported."); + return -ENOTSUP; + } + + inset_reg = inset->inset; + for (i = 0; i < 2; i++) + mask_reg[i] = (inset->mask[i].field_idx << 16) | + inset->mask[i].mask; + + switch (inset_type) { + case INSET_HASH: + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR: + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR_FLX: + i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i), + mask_reg[i]); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + return 0; +}