#include <rte_tailq.h>
#include "base/i40e_prototype.h"
+#include "base/i40e_dcb.h"
#include "i40e_ethdev.h"
#include "i40e_pf.h"
+#include "i40e_rxtx.h"
#include "rte_pmd_i40e.h"
-/* The max bandwidth of i40e is 40Gbps. */
-#define I40E_QOS_BW_MAX 40000
-/* The bandwidth should be the multiple of 50Mbps. */
-#define I40E_QOS_BW_GRANULARITY 50
-/* The min bandwidth weight is 1. */
-#define I40E_QOS_BW_WEIGHT_MIN 1
-/* The max bandwidth weight is 127. */
-#define I40E_QOS_BW_WEIGHT_MAX 127
-
int
-rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
}
int
-rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
for (i = 0; i < vlan_num; i++) {
mv_f[i].filter_type = f->mac_info.filter_type;
- (void)rte_memcpy(&mv_f[i].macaddr,
+ rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr,
ETH_ADDR_LEN);
}
vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
}
int
-rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr)
{
struct i40e_mac_filter *f;
/* Remove all existing mac */
TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
- i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+ if (i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr)
+ != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Delete MAC failed");
return 0;
}
/* Set vlan strip on/off for specific VF from host */
int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
uint16_t vlan_id)
{
struct rte_eth_dev *dev;
vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
return ret;
}
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
uint8_t on)
{
struct rte_eth_dev *dev;
}
if (on) {
- (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
ret = i40e_vsi_add_mac(vsi, &filter);
} else {
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
memset(&ctxt, 0, sizeof(ctxt));
- (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
ctxt.seid = vsi->seid;
hw = I40E_VSI_TO_HW(vsi);
return count;
}
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
uint64_t vf_mask, uint8_t on)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_get_vf_stats(uint8_t port,
+rte_pmd_i40e_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats)
{
}
int
-rte_pmd_i40e_reset_vf_stats(uint8_t port,
+rte_pmd_i40e_reset_vf_stats(uint16_t port,
uint16_t vf_id)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
uint8_t tc_num, uint8_t *bw_weight)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
uint8_t tc_no, uint32_t bw)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
return ret;
}
-#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_PROFILE_INFO_SIZE sizeof(struct rte_pmd_i40e_profile_info)
#define I40E_MAX_PROFILE_NUM 16
static void
return status;
}
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-
/* Check if the profile info exists */
static int
-i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
{
struct rte_eth_dev *dev = &rte_eth_devices[port];
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
sizeof(struct i40e_profile_section_header));
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
- if ((pinfo->track_id == p->track_id) &&
- !memcmp(&pinfo->version, &p->version,
- sizeof(struct i40e_ddp_version)) &&
- !memcmp(&pinfo->name, &p->name,
- I40E_DDP_NAME_SIZE)) {
+ if (pinfo->track_id == p->track_id) {
PMD_DRV_LOG(INFO, "Profile exists.");
rte_free(buff);
return 1;
}
int
-rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op)
{
int is_exist;
enum i40e_status_code status = I40E_SUCCESS;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_ONLY &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ return -ENOTSUP;
+ }
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
dev = &rte_eth_devices[port];
return -EINVAL;
}
+ i40e_update_customized_info(dev, buff, size);
+
/* Find metadata segment */
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
pkg_hdr);
return -EINVAL;
}
track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ PMD_DRV_LOG(ERR, "Invalid track_id");
+ return -EINVAL;
+ }
/* Find profile segment */
profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
return -EINVAL;
}
+ /* Check if the profile already loaded */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec,
+ op == RTE_PMD_I40E_PKG_OP_WR_ADD);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
- /* Check if the profile exists */
- i40e_generate_profile_info_sec(
- ((struct i40e_profile_segment *)profile_seg_hdr)->name,
- &((struct i40e_profile_segment *)profile_seg_hdr)->version,
- track_id, profile_info_sec, 1);
- is_exist = i40e_check_profile_info(port, profile_info_sec);
- if (is_exist > 0) {
+ if (is_exist) {
PMD_DRV_LOG(ERR, "Profile already exists.");
rte_free(profile_info_sec);
- return 1;
- } else if (is_exist < 0) {
- PMD_DRV_LOG(ERR, "Failed to check profile.");
+ return -EEXIST;
+ }
+ } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ if (!is_exist) {
+ PMD_DRV_LOG(ERR, "Profile does not exist.");
rte_free(profile_info_sec);
- return -EINVAL;
+ return -EACCES;
}
+ }
- /* Write profile to HW */
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ status = i40e_rollback_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile for delete.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+ } else {
status = i40e_write_profile(
- hw,
- (struct i40e_profile_segment *)profile_seg_hdr,
- track_id);
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
if (status) {
- PMD_DRV_LOG(ERR, "Failed to write profile.");
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to write profile for add.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
rte_free(profile_info_sec);
return status;
}
+ }
- /* Add profile info to info list */
+ if (track_id && (op != RTE_PMD_I40E_PKG_OP_WR_ONLY)) {
+ /* Modify loaded profiles info list */
status = i40e_add_rm_profile_info(hw, profile_info_sec);
- if (status)
- PMD_DRV_LOG(ERR, "Failed to add profile info.");
- } else {
- PMD_DRV_LOG(ERR, "Operation not supported.");
+ if (status) {
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ PMD_DRV_LOG(ERR, "Failed to add profile to info list.");
+ else
+ PMD_DRV_LOG(ERR, "Failed to delete profile from info list.");
+ }
}
rte_free(profile_info_sec);
return status;
}
+/* Get number of tvl records in the section */
+static unsigned int
+i40e_get_tlv_section_size(struct i40e_profile_section_header *sec)
+{
+ unsigned int i, nb_rec, nb_tlv = 0;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ if (!sec)
+ return nb_tlv;
+
+ /* get number of records in the section */
+ nb_rec = sec->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = 0; i < nb_rec; ) {
+ tlv = (struct i40e_profile_tlv_section_record *)&sec[1 + i];
+ i += tlv->len;
+ nb_tlv++;
+ }
+ return nb_tlv;
+}
+
+int rte_pmd_i40e_get_ddp_info(uint8_t *pkg_buff, uint32_t pkg_size,
+ uint8_t *info_buff, uint32_t info_size,
+ enum rte_pmd_i40e_package_info type)
+{
+ uint32_t ret_size;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *i40e_seg_hdr;
+ struct i40e_generic_seg_header *note_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+
+ if (!info_buff) {
+ PMD_DRV_LOG(ERR, "Output info buff is invalid.");
+ return -EINVAL;
+ }
+
+ if (!pkg_buff || pkg_size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Package buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)pkg_buff;
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+
+ /* Find global notes segment */
+ note_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_NOTES,
+ pkg_hdr);
+
+ /* Find i40e profile segment */
+ i40e_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+
+ /* get global header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_metadata_segment *)metadata_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get global note size */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ if (note_seg_hdr == NULL)
+ ret_size = 0;
+ else
+ ret_size = note_seg_hdr->size;
+ *(uint32_t *)info_buff = ret_size;
+ return I40E_SUCCESS;
+ }
+
+ /* get global note */
+ if (type == RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES) {
+ if (note_seg_hdr == NULL)
+ return -ENOTSUP;
+ if (info_size < note_seg_hdr->size) {
+ PMD_DRV_LOG(ERR, "Information buffer size is too small");
+ return -EINVAL;
+ }
+ memcpy(info_buff, ¬e_seg_hdr[1], note_seg_hdr->size);
+ return I40E_SUCCESS;
+ }
+
+ /* get i40e segment header info */
+ if (type == RTE_PMD_I40E_PKG_INFO_HEADER) {
+ struct rte_pmd_i40e_profile_info *info =
+ (struct rte_pmd_i40e_profile_info *)info_buff;
+
+ if (info_size < sizeof(struct rte_pmd_i40e_profile_info)) {
+ PMD_DRV_LOG(ERR, "Output info buff size is invalid.");
+ return -EINVAL;
+ }
+
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+
+ if (!i40e_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find i40e segment header");
+ return -EINVAL;
+ }
+
+ memset(info, 0, sizeof(struct rte_pmd_i40e_profile_info));
+ info->owner = RTE_PMD_I40E_DDP_OWNER_UNKNOWN;
+ info->track_id =
+ ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ memcpy(info->name,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->name,
+ I40E_DDP_NAME_SIZE);
+ memcpy(&info->version,
+ &((struct i40e_profile_segment *)i40e_seg_hdr)->version,
+ sizeof(struct i40e_ddp_version));
+ return I40E_SUCCESS;
+ }
+
+ /* get number of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_NUM) {
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ *(uint32_t *)info_buff =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ return I40E_SUCCESS;
+ }
+
+ /* get list of devices */
+ if (type == RTE_PMD_I40E_PKG_INFO_DEVID_LIST) {
+ uint32_t dev_num;
+ dev_num =
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table_count;
+ if (info_size < sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ memcpy(info_buff,
+ ((struct i40e_profile_segment *)i40e_seg_hdr)->device_table,
+ sizeof(struct rte_pmd_i40e_ddp_device_id) * dev_num);
+ return I40E_SUCCESS;
+ }
+
+ /* get number of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM) {
+ struct i40e_profile_section_header *proto;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(proto);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of protocols */
+ if (type == RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_proto_info *pinfo;
+ struct i40e_profile_section_header *proto;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_proto_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_proto_info);
+ for (i = 0; i < nb_proto_info; i++) {
+ pinfo[i].proto_id = RTE_PMD_I40E_PROTO_UNUSED;
+ memset(pinfo[i].name, 0, RTE_PMD_I40E_DDP_NAME_SIZE);
+ }
+ proto = i40e_find_section_in_profile(SECTION_TYPE_PROTO,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(proto);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = proto->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&proto[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ pinfo[j].proto_id = tlv->data[0];
+ snprintf(pinfo[j].name, I40E_DDP_NAME_SIZE, "%s",
+ (const char *)&tlv->data[1]);
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM) {
+ struct i40e_profile_section_header *pctype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(pctype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet classification types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *pctype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ pctype = i40e_find_section_in_profile(SECTION_TYPE_PCTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(pctype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+
+ /* get number of records in the section */
+ nb_rec = pctype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ tlv = (struct i40e_profile_tlv_section_record *)&pctype[1];
+ for (i = j = 0; i < nb_rec; j++) {
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ tlv = &tlv[tlv->len];
+ }
+ return I40E_SUCCESS;
+ }
+
+ /* get number of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_NUM) {
+ struct i40e_profile_section_header *ptype;
+
+ if (info_size < sizeof(uint32_t)) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ *(uint32_t *)info_buff = i40e_get_tlv_section_size(ptype);
+ return I40E_SUCCESS;
+ }
+
+ /* get list of packet types */
+ if (type == RTE_PMD_I40E_PKG_INFO_PTYPE_LIST) {
+ uint32_t i, j, nb_tlv, nb_rec, nb_proto_info;
+ struct rte_pmd_i40e_ptype_info *pinfo;
+ struct i40e_profile_section_header *ptype;
+ struct i40e_profile_tlv_section_record *tlv;
+
+ pinfo = (struct rte_pmd_i40e_ptype_info *)info_buff;
+ nb_proto_info = info_size /
+ sizeof(struct rte_pmd_i40e_ptype_info);
+ for (i = 0; i < nb_proto_info; i++)
+ memset(&pinfo[i], RTE_PMD_I40E_PROTO_UNUSED,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ ptype = i40e_find_section_in_profile(SECTION_TYPE_PTYPE,
+ (struct i40e_profile_segment *)i40e_seg_hdr);
+ nb_tlv = i40e_get_tlv_section_size(ptype);
+ if (nb_tlv == 0)
+ return I40E_SUCCESS;
+ if (nb_proto_info < nb_tlv) {
+ PMD_DRV_LOG(ERR, "Invalid information buffer size");
+ return -EINVAL;
+ }
+ /* get number of records in the section */
+ nb_rec = ptype->section.size /
+ sizeof(struct i40e_profile_tlv_section_record);
+ for (i = j = 0; i < nb_rec; j++) {
+ tlv = (struct i40e_profile_tlv_section_record *)
+ &ptype[1 + i];
+ memcpy(&pinfo[j], tlv->data,
+ sizeof(struct rte_pmd_i40e_ptype_info));
+ i += tlv->len;
+ }
+ return I40E_SUCCESS;
+ }
+
+ PMD_DRV_LOG(ERR, "Info type %u is invalid.", type);
+ return -EINVAL;
+}
+
int
-rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
return status;
}
+
+static int check_invalid_pkt_type(uint32_t pkt_type)
+{
+ uint32_t l2, l3, l4, tnl, il2, il3, il4;
+
+ l2 = pkt_type & RTE_PTYPE_L2_MASK;
+ l3 = pkt_type & RTE_PTYPE_L3_MASK;
+ l4 = pkt_type & RTE_PTYPE_L4_MASK;
+ tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
+ il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
+ il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
+ il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
+
+ if (l2 &&
+ l2 != RTE_PTYPE_L2_ETHER &&
+ l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
+ l2 != RTE_PTYPE_L2_ETHER_ARP &&
+ l2 != RTE_PTYPE_L2_ETHER_LLDP &&
+ l2 != RTE_PTYPE_L2_ETHER_NSH &&
+ l2 != RTE_PTYPE_L2_ETHER_VLAN &&
+ l2 != RTE_PTYPE_L2_ETHER_QINQ)
+ return -1;
+
+ if (l3 &&
+ l3 != RTE_PTYPE_L3_IPV4 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (l4 &&
+ l4 != RTE_PTYPE_L4_TCP &&
+ l4 != RTE_PTYPE_L4_UDP &&
+ l4 != RTE_PTYPE_L4_FRAG &&
+ l4 != RTE_PTYPE_L4_SCTP &&
+ l4 != RTE_PTYPE_L4_ICMP &&
+ l4 != RTE_PTYPE_L4_NONFRAG)
+ return -1;
+
+ if (tnl &&
+ tnl != RTE_PTYPE_TUNNEL_IP &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_VXLAN &&
+ tnl != RTE_PTYPE_TUNNEL_NVGRE &&
+ tnl != RTE_PTYPE_TUNNEL_GENEVE &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_GTPC &&
+ tnl != RTE_PTYPE_TUNNEL_GTPU)
+ return -1;
+
+ if (il2 &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
+ return -1;
+
+ if (il3 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (il4 &&
+ il4 != RTE_PTYPE_INNER_L4_TCP &&
+ il4 != RTE_PTYPE_INNER_L4_UDP &&
+ il4 != RTE_PTYPE_INNER_L4_FRAG &&
+ il4 != RTE_PTYPE_INNER_L4_SCTP &&
+ il4 != RTE_PTYPE_INNER_L4_ICMP &&
+ il4 != RTE_PTYPE_INNER_L4_NONFRAG)
+ return -1;
+
+ return 0;
+}
+
+static int check_invalid_ptype_mapping(
+ struct rte_pmd_i40e_ptype_mapping *mapping_table,
+ uint16_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uint16_t ptype = mapping_table[i].hw_ptype;
+ uint32_t pkt_type = mapping_table[i].sw_ptype;
+
+ if (ptype >= I40E_MAX_PKT_TYPE)
+ return -1;
+
+ if (pkt_type == RTE_PTYPE_UNKNOWN)
+ continue;
+
+ if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
+ continue;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ptype_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_MAX_PKT_TYPE)
+ return -EINVAL;
+
+ if (check_invalid_ptype_mapping(mapping_items, count))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
+ }
+
+ for (i = 0; i < count; i++)
+ ad->ptype_tbl[mapping_items[i].hw_ptype]
+ = mapping_items[i].sw_ptype;
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_ptype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int n = 0;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (n >= size)
+ break;
+ if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
+ continue;
+ mapping_items[n].hw_ptype = i;
+ mapping_items[n].sw_ptype = ad->ptype_tbl[i];
+ n++;
+ }
+
+ *count = n;
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!mask && check_invalid_pkt_type(target))
+ return -EINVAL;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (mask) {
+ if ((target | ad->ptype_tbl[i]) == target &&
+ (target & ad->ptype_tbl[i]))
+ ad->ptype_tbl[i] = pkt_type;
+ } else {
+ if (ad->ptype_tbl[i] == target)
+ ad->ptype_tbl[i] = pkt_type;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_add_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ struct i40e_mac_filter_info mac_filter;
+ int ret;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ether_addr_copy(mac_addr, &mac_filter.mac_addr);
+ ret = i40e_vsi_add_mac(vsi, &mac_filter);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
+ return -1;
+ }
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_reset(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_pctype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_flow_type_mapping_get(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ mapping_items[i].flow_type = i;
+ mapping_items[i].pctype = ad->pctypes_tbl[i];
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_flow_type_mapping_update(
+ uint16_t port,
+ struct rte_pmd_i40e_flow_type_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_FLOW_TYPE_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++)
+ if (mapping_items[i].flow_type >= I40E_FLOW_TYPE_MAX ||
+ mapping_items[i].flow_type == RTE_ETH_FLOW_UNKNOWN ||
+ (mapping_items[i].pctype &
+ (1ULL << I40E_FILTER_PCTYPE_INVALID)))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ }
+
+ for (i = 0; i < count; i++) {
+ ad->pctypes_tbl[mapping_items[i].flow_type] =
+ mapping_items[i].pctype;
+ if (mapping_items[i].pctype)
+ ad->flow_types_mask |=
+ (1ULL << mapping_items[i].flow_type);
+ else
+ ad->flow_types_mask &=
+ ~(1ULL << mapping_items[i].flow_type);
+ }
+
+ for (i = 0, ad->pctypes_mask = 0ULL; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_query_vfid_by_mac(uint16_t port, const struct ether_addr *vf_mac)
+{
+ struct rte_eth_dev *dev;
+ struct ether_addr *mac;
+ struct i40e_pf *pf;
+ int vf_id;
+ struct i40e_pf_vf *vf;
+ uint16_t vf_num;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ vf_num = pf->vf_num;
+
+ for (vf_id = 0; vf_id < vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ mac = &vf->mac_addr;
+
+ if (is_same_ether_addr(mac, vf_mac))
+ return vf_id;
+ }
+
+ return -EINVAL;
+}
+
+static int
+i40e_vsi_update_queue_region_mapping(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint16_t i;
+ struct i40e_vsi *vsi = pf->main_vsi;
+ uint16_t queue_offset, bsf, tc_index;
+ struct i40e_vsi_context ctxt;
+ struct i40e_aqc_vsi_properties_data *vsi_info;
+ struct i40e_queue_regions *region_info =
+ &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!region_info->queue_region_number) {
+ PMD_INIT_LOG(ERR, "there is no that region id been set before");
+ return ret;
+ }
+
+ memset(&ctxt, 0, sizeof(struct i40e_vsi_context));
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.info = vsi->info;
+ vsi_info = &ctxt.info;
+
+ memset(vsi_info->tc_mapping, 0, sizeof(uint16_t) * 8);
+ memset(vsi_info->queue_mapping, 0, sizeof(uint16_t) * 16);
+
+ /* Configure queue region and queue mapping parameters,
+ * for enabled queue region, allocate queues to this region.
+ */
+
+ for (i = 0; i < region_info->queue_region_number; i++) {
+ tc_index = region_info->region[i].region_id;
+ bsf = rte_bsf32(region_info->region[i].queue_num);
+ queue_offset = region_info->region[i].queue_start_index;
+ vsi_info->tc_mapping[tc_index] = rte_cpu_to_le_16(
+ (queue_offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+ }
+
+ /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
+ vsi_info->mapping_flags |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ vsi_info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
+ vsi_info->valid_sections |=
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to configure queue region mapping = %d ",
+ hw->aq.asq_last_status);
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+ rte_memcpy(&vsi->info.queue_mapping,
+ &ctxt.info.queue_mapping,
+ sizeof(vsi->info.queue_mapping));
+ vsi->info.mapping_flags = ctxt.info.mapping_flags;
+ vsi->info.valid_sections = 0;
+
+ return 0;
+}
+
+
+static int
+i40e_queue_region_set_region(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *conf_ptr)
+{
+ uint16_t i;
+ struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+
+ if (!((rte_is_power_of_2(conf_ptr->queue_num)) &&
+ conf_ptr->queue_num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return ret;
+ }
+
+ if (conf_ptr->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if ((conf_ptr->queue_start_index + conf_ptr->queue_num)
+ > main_vsi->nb_used_qps) {
+ PMD_DRV_LOG(ERR, "the queue index exceeds the VSI range");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (conf_ptr->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number &&
+ i <= I40E_REGION_MAX_INDEX) {
+ info->region[i].region_id = conf_ptr->region_id;
+ info->region[i].queue_num = conf_ptr->queue_num;
+ info->region[i].queue_start_index =
+ conf_ptr->queue_start_index;
+ info->queue_region_number++;
+ } else {
+ PMD_DRV_LOG(ERR, "queue region number exceeds maxnum 8 or the queue region id has been set before");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i40e_queue_region_set_flowtype(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ uint16_t i, j;
+ uint16_t region_index, flowtype_index;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ if (rss_region_conf->region_id > I40E_PFQF_HREGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->hw_flowtype >= I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return ret;
+ }
+
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ if (rss_region_conf->hw_flowtype ==
+ info->region[i].hw_flowtype[j]) {
+ PMD_DRV_LOG(ERR, "that hw_flowtype has been set before");
+ return 0;
+ }
+ }
+ }
+
+ flowtype_index = info->region[region_index].flowtype_num;
+ info->region[region_index].hw_flowtype[flowtype_index] =
+ rss_region_conf->hw_flowtype;
+ info->region[region_index].flowtype_num++;
+
+ return 0;
+}
+
+static void
+i40e_queue_region_pf_flowtype_conf(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ uint8_t hw_flowtype;
+ uint32_t pfqf_hregion;
+ uint16_t i, j, index;
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ /* For the pctype or hardware flowtype of packet,
+ * the specific index for each type has been defined
+ * in file i40e_type.h as enum i40e_filter_pctype.
+ */
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].flowtype_num; j++) {
+ hw_flowtype = info->region[i].hw_flowtype[j];
+ index = hw_flowtype >> 3;
+ pfqf_hregion =
+ i40e_read_rx_ctl(hw, I40E_PFQF_HREGION(index));
+
+ if ((hw_flowtype & 0x7) == 0) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_0_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 1) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_1_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 2) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_2_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 3) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_3_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 4) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_4_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 5) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_5_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT;
+ } else if ((hw_flowtype & 0x7) == 6) {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_6_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT;
+ } else {
+ pfqf_hregion |= info->region[i].region_id <<
+ I40E_PFQF_HREGION_REGION_7_SHIFT;
+ pfqf_hregion |= 1 <<
+ I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT;
+ }
+
+ i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(index),
+ pfqf_hregion);
+ }
+ }
+}
+
+static int
+i40e_queue_region_set_user_priority(struct i40e_pf *pf,
+ struct rte_pmd_i40e_queue_region_conf *rss_region_conf)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, region_index;
+
+ if (rss_region_conf->user_priority >= I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return ret;
+ }
+
+ if (rss_region_conf->region_id > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the region_id max index is 7");
+ return ret;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++)
+ if (rss_region_conf->region_id == info->region[i].region_id)
+ break;
+
+ if (i == info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "that region id has not been set before");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ region_index = i;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ if (info->region[i].user_priority[j] ==
+ rss_region_conf->user_priority) {
+ PMD_DRV_LOG(ERR, "that user priority has been set before");
+ return 0;
+ }
+ }
+ }
+
+ j = info->region[region_index].user_priority_num;
+ info->region[region_index].user_priority[j] =
+ rss_region_conf->user_priority;
+ info->region[region_index].user_priority_num++;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_dcb_configure(struct i40e_hw *hw,
+ struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config dcb_cfg_local;
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
+ int32_t ret = -EINVAL;
+ uint16_t i, j, prio_index, region_index;
+ uint8_t tc_map, tc_bw, bw_lf;
+
+ if (!info->queue_region_number) {
+ PMD_DRV_LOG(ERR, "No queue region been set before");
+ return ret;
+ }
+
+ dcb_cfg = &dcb_cfg_local;
+ memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
+
+ /* assume each tc has the same bw */
+ tc_bw = I40E_MAX_PERCENT / info->queue_region_number;
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
+ /* to ensure the sum of tcbw is equal to 100 */
+ bw_lf = I40E_MAX_PERCENT % info->queue_region_number;
+ for (i = 0; i < bw_lf; i++)
+ dcb_cfg->etscfg.tcbwtable[i]++;
+
+ /* assume each tc has the same Transmission Selection Algorithm */
+ for (i = 0; i < info->queue_region_number; i++)
+ dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ for (j = 0; j < info->region[i].user_priority_num; j++) {
+ prio_index = info->region[i].user_priority[j];
+ region_index = info->region[i].region_id;
+ dcb_cfg->etscfg.prioritytable[prio_index] =
+ region_index;
+ }
+ }
+
+ /* FW needs one App to configure HW */
+ dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
+ dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
+ dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
+ dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ tc_map = RTE_LEN2MASK(info->queue_region_number, uint8_t);
+
+ dcb_cfg->pfc.willing = 0;
+ dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->pfc.pfcenable = tc_map;
+
+ /* Copy the new config to the current config */
+ *old_cfg = *dcb_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(hw);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Set queue region DCB Config failed, err %s aq_err %s",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
+ struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on)
+{
+ int32_t ret = -EINVAL;
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ if (on) {
+ i40e_queue_region_pf_flowtype_conf(hw, pf);
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+ return ret;
+ }
+
+ ret = i40e_queue_region_dcb_configure(hw, pf);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ return ret;
+ }
+
+ return 0;
+ }
+
+ info->queue_region_number = 1;
+ info->region[0].queue_num = 64;
+ info->region[0].queue_start_index = 0;
+
+ ret = i40e_vsi_update_queue_region_mapping(hw, pf);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(INFO, "Failed to flush queue region mapping.");
+
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to flush dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+
+ i40e_init_queue_region_conf(dev);
+
+ return 0;
+}
+
+static int
+i40e_queue_region_pf_check_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint64_t hena;
+
+ hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
+ hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
+
+ if (!hena)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static int
+i40e_queue_region_get_all_info(struct i40e_pf *pf,
+ struct i40e_queue_regions *regions_ptr)
+{
+ struct i40e_queue_regions *info = &pf->queue_region;
+
+ rte_memcpy(regions_ptr, info,
+ sizeof(struct i40e_queue_regions));
+
+ return 0;
+}
+
+int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id,
+ enum rte_pmd_i40e_queue_region_op op_type, void *arg)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!(!i40e_queue_region_pf_check_rss(pf)))
+ return -ENOTSUP;
+
+ /* This queue region feature only support pf by now. It should
+ * be called after dev_start, and will be clear after dev_stop.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON"
+ * is just an enable function which server for other configuration,
+ * it is for all configuration about queue region from up layer,
+ * at first will only keep in DPDK softwarestored in driver,
+ * only after "FLUSH_ON", it commit all configuration to HW.
+ * Because PMD had to set hardware configuration at a time, so
+ * it will record all up layer command at first.
+ * "RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF" is
+ * just clean all configuration about queue region just now,
+ * and restore all to DPDK i40e driver default
+ * config when start up.
+ */
+
+ switch (op_type) {
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_SET:
+ ret = i40e_queue_region_set_region(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET:
+ ret = i40e_queue_region_set_flowtype(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET:
+ ret = i40e_queue_region_set_user_priority(pf,
+ (struct rte_pmd_i40e_queue_region_conf *)arg);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF:
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+ break;
+ case RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET:
+ ret = i40e_queue_region_get_all_info(pf,
+ (struct i40e_queue_regions *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "op type (%d) not supported",
+ op_type);
+ ret = -EINVAL;
+ }
+
+ I40E_WRITE_FLUSH(hw);
+
+ return ret;
+}
+
+int rte_pmd_i40e_flow_add_del_packet_template(
+ uint16_t port,
+ const struct rte_pmd_i40e_pkt_template_conf *conf,
+ uint8_t add)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_fdir_filter_conf filter_conf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ memset(&filter_conf, 0, sizeof(filter_conf));
+ filter_conf.soft_id = conf->soft_id;
+ filter_conf.input.flow.raw_flow.pctype = conf->input.pctype;
+ filter_conf.input.flow.raw_flow.packet = conf->input.packet;
+ filter_conf.input.flow.raw_flow.length = conf->input.length;
+ filter_conf.input.flow_ext.pkt_template = true;
+
+ filter_conf.action.rx_queue = conf->action.rx_queue;
+ filter_conf.action.behavior =
+ (enum i40e_fdir_behavior)conf->action.behavior;
+ filter_conf.action.report_status =
+ (enum i40e_fdir_status)conf->action.report_status;
+ filter_conf.action.flex_off = conf->action.flex_off;
+
+ return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add);
+}