[ixgbe] (@ref rte_pmd_ixgbe.h),
[i40e] (@ref rte_pmd_i40e.h),
[ice] (@ref rte_pmd_ice.h),
+ [iavf] (@ref rte_pmd_iavf.h),
[ioat] (@ref rte_ioat_rawdev.h),
[bnxt] (@ref rte_pmd_bnxt.h),
[dpaa] (@ref rte_pmd_dpaa.h),
@TOPDIR@/drivers/net/dpaa \
@TOPDIR@/drivers/net/dpaa2 \
@TOPDIR@/drivers/net/i40e \
+ @TOPDIR@/drivers/net/iavf \
@TOPDIR@/drivers/net/ice \
@TOPDIR@/drivers/net/ixgbe \
@TOPDIR@/drivers/net/mlx5 \
assignment in hypervisor. Take qemu for example, the device assignment should carry the IAVF device id (0x1889) like
``-device vfio-pci,x-pci-device-id=0x1889,host=03:0a.0``.
+ When IAVF is backed by an IntelĀ® E810 device, the "Protocol Extraction" feature which is supported by ice PMD is also
+ available for IAVF PMD. The same devargs with the same parameters can be applied to IAVF PMD, for detail please reference
+ the section ``Protocol extraction for per queue`` of ice.rst.
+
The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* **Updated Intel iavf driver.**
+ Updated iavf PMD with new features and improvements, including:
+
+ * Added support for flexible descriptor metadata extraction.
* Added support of AVX512 instructions in Rx and Tx path.
* **Updated Intel ice driver.**
struct virtchnl_vf_resource *vf_res; /* VF resource */
struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
uint64_t supported_rxdid;
-
+ uint8_t *proto_xtr; /* proto xtr type for all queues */
volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
uint32_t cmd_retval; /* return value of the cmd response from PF */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
#define IAVF_MAX_PKT_TYPE 1024
+#define IAVF_MAX_QUEUE_NUM 2048
+
+enum iavf_proto_xtr_type {
+ IAVF_PROTO_XTR_NONE,
+ IAVF_PROTO_XTR_VLAN,
+ IAVF_PROTO_XTR_IPV4,
+ IAVF_PROTO_XTR_IPV6,
+ IAVF_PROTO_XTR_IPV6_FLOW,
+ IAVF_PROTO_XTR_TCP,
+ IAVF_PROTO_XTR_IP_OFFSET,
+ IAVF_PROTO_XTR_MAX,
+};
+
+/**
+ * Cache devargs parse result.
+ */
+struct iavf_devargs {
+ uint8_t proto_xtr_dflt;
+ uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
+};
+
/* Structure to store private data for each VF instance. */
struct iavf_adapter {
struct iavf_hw hw;
const uint32_t *ptype_tbl;
bool stopped;
uint16_t fdir_ref_cnt;
+ struct iavf_devargs devargs;
};
/* IAVF_DEV_PRIVATE_TO */
#include "iavf.h"
#include "iavf_rxtx.h"
#include "iavf_generic_flow.h"
+#include "rte_pmd_iavf.h"
+
+/* devargs */
+#define IAVF_PROTO_XTR_ARG "proto_xtr"
+
+static const char * const iavf_valid_args[] = {
+ IAVF_PROTO_XTR_ARG,
+ NULL
+};
+
+static const struct rte_mbuf_dynfield iavf_proto_xtr_metadata_param = {
+ .name = "intel_pmd_dynfield_proto_xtr_metadata",
+ .size = sizeof(uint32_t),
+ .align = __alignof__(uint32_t),
+ .flags = 0,
+};
+
+struct iavf_proto_xtr_ol {
+ const struct rte_mbuf_dynflag param;
+ uint64_t *ol_flag;
+ bool required;
+};
+
+static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
+ [IAVF_PROTO_XTR_VLAN] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_vlan" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_vlan_mask },
+ [IAVF_PROTO_XTR_IPV4] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv4" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask },
+ [IAVF_PROTO_XTR_IPV6] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask },
+ [IAVF_PROTO_XTR_IPV6_FLOW] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_ipv6_flow" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask },
+ [IAVF_PROTO_XTR_TCP] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_tcp" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_tcp_mask },
+ [IAVF_PROTO_XTR_IP_OFFSET] = {
+ .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
+ .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+};
static int iavf_dev_configure(struct rte_eth_dev *dev);
static int iavf_dev_start(struct rte_eth_dev *dev);
return 0;
}
+static int
+iavf_lookup_proto_xtr_type(const char *flex_name)
+{
+ static struct {
+ const char *name;
+ enum iavf_proto_xtr_type type;
+ } xtr_type_map[] = {
+ { "vlan", IAVF_PROTO_XTR_VLAN },
+ { "ipv4", IAVF_PROTO_XTR_IPV4 },
+ { "ipv6", IAVF_PROTO_XTR_IPV6 },
+ { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
+ { "tcp", IAVF_PROTO_XTR_TCP },
+ { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+ };
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
+ if (strcmp(flex_name, xtr_type_map[i].name) == 0)
+ return xtr_type_map[i].type;
+ }
+
+ PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
+ "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+
+ return -1;
+}
+
+/**
+ * Parse elem, the elem could be single number/range or '(' ')' group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
+ * Within group elem, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+static int
+iavf_parse_queue_set(const char *input, int xtr_type,
+ struct iavf_devargs *devargs)
+{
+ const char *str = input;
+ char *end = NULL;
+ uint32_t min, max;
+ uint32_t idx;
+
+ while (isblank(*str))
+ str++;
+
+ if (!isdigit(*str) && *str != '(')
+ return -1;
+
+ /* process single number or single range of number */
+ if (*str != '(') {
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
+ return -1;
+
+ while (isblank(*end))
+ end++;
+
+ min = idx;
+ max = idx;
+
+ /* process single <number>-<number> */
+ if (*end == '-') {
+ end++;
+ while (isblank(*end))
+ end++;
+ if (!isdigit(*end))
+ return -1;
+
+ errno = 0;
+ idx = strtoul(end, &end, 10);
+ if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
+ return -1;
+
+ max = idx;
+ while (isblank(*end))
+ end++;
+ }
+
+ if (*end != ':')
+ return -1;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ devargs->proto_xtr[idx] = xtr_type;
+
+ return 0;
+ }
+
+ /* process set within bracket */
+ str++;
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = IAVF_MAX_QUEUE_NUM;
+ do {
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || !end || idx >= IAVF_MAX_QUEUE_NUM)
+ return -1;
+
+ /* go ahead to separator '-',',' and ')' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == IAVF_MAX_QUEUE_NUM)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if (*end == ',' || *end == ')') {
+ max = idx;
+ if (min == IAVF_MAX_QUEUE_NUM)
+ min = idx;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ devargs->proto_xtr[idx] = xtr_type;
+
+ min = IAVF_MAX_QUEUE_NUM;
+ } else {
+ return -1;
+ }
+
+ str = end + 1;
+ } while (*end != ')' && *end != '\0');
+
+ return 0;
+}
+
+static int
+iavf_parse_queue_proto_xtr(const char *queues, struct iavf_devargs *devargs)
+{
+ const char *queue_start;
+ uint32_t idx;
+ int xtr_type;
+ char flex_name[32];
+
+ while (isblank(*queues))
+ queues++;
+
+ if (*queues != '[') {
+ xtr_type = iavf_lookup_proto_xtr_type(queues);
+ if (xtr_type < 0)
+ return -1;
+
+ devargs->proto_xtr_dflt = xtr_type;
+
+ return 0;
+ }
+
+ queues++;
+ do {
+ while (isblank(*queues))
+ queues++;
+ if (*queues == '\0')
+ return -1;
+
+ queue_start = queues;
+
+ /* go across a complete bracket */
+ if (*queue_start == '(') {
+ queues += strcspn(queues, ")");
+ if (*queues != ')')
+ return -1;
+ }
+
+ /* scan the separator ':' */
+ queues += strcspn(queues, ":");
+ if (*queues++ != ':')
+ return -1;
+ while (isblank(*queues))
+ queues++;
+
+ for (idx = 0; ; idx++) {
+ if (isblank(queues[idx]) ||
+ queues[idx] == ',' ||
+ queues[idx] == ']' ||
+ queues[idx] == '\0')
+ break;
+
+ if (idx > sizeof(flex_name) - 2)
+ return -1;
+
+ flex_name[idx] = queues[idx];
+ }
+ flex_name[idx] = '\0';
+ xtr_type = iavf_lookup_proto_xtr_type(flex_name);
+ if (xtr_type < 0)
+ return -1;
+
+ queues += idx;
+
+ while (isblank(*queues) || *queues == ',' || *queues == ']')
+ queues++;
+
+ if (iavf_parse_queue_set(queue_start, xtr_type, devargs) < 0)
+ return -1;
+ } while (*queues != '\0');
+
+ return 0;
+}
+
+static int
+iavf_handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
+ void *extra_args)
+{
+ struct iavf_devargs *devargs = extra_args;
+
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ if (iavf_parse_queue_proto_xtr(value, devargs) < 0) {
+ PMD_DRV_LOG(ERR, "the proto_xtr's parameter is wrong : '%s'",
+ value);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iavf_parse_devargs(struct rte_eth_dev *dev)
+{
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_devargs *devargs = dev->device->devargs;
+ struct rte_kvargs *kvlist;
+ int ret;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args);
+ if (!kvlist) {
+ PMD_INIT_LOG(ERR, "invalid kvargs key\n");
+ return -EINVAL;
+ }
+
+ ad->devargs.proto_xtr_dflt = IAVF_PROTO_XTR_NONE;
+ memset(ad->devargs.proto_xtr, IAVF_PROTO_XTR_NONE,
+ sizeof(ad->devargs.proto_xtr));
+
+ ret = rte_kvargs_process(kvlist, IAVF_PROTO_XTR_ARG,
+ &iavf_handle_proto_xtr_arg, &ad->devargs);
+ if (ret)
+ goto bail;
+
+bail:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static void
+iavf_init_proto_xtr(struct rte_eth_dev *dev)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct iavf_adapter *ad =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ const struct iavf_proto_xtr_ol *xtr_ol;
+ bool proto_xtr_enable = false;
+ int offset;
+ uint16_t i;
+
+ vf->proto_xtr = rte_zmalloc("vf proto xtr",
+ vf->vsi_res->num_queue_pairs, 0);
+ if (unlikely(!(vf->proto_xtr))) {
+ PMD_DRV_LOG(ERR, "no memory for setting up proto_xtr's table");
+ return;
+ }
+
+ for (i = 0; i < vf->vsi_res->num_queue_pairs; i++) {
+ vf->proto_xtr[i] = ad->devargs.proto_xtr[i] !=
+ IAVF_PROTO_XTR_NONE ?
+ ad->devargs.proto_xtr[i] :
+ ad->devargs.proto_xtr_dflt;
+
+ if (vf->proto_xtr[i] != IAVF_PROTO_XTR_NONE) {
+ uint8_t type = vf->proto_xtr[i];
+
+ iavf_proto_xtr_params[type].required = true;
+ proto_xtr_enable = true;
+ }
+ }
+
+ if (likely(!proto_xtr_enable))
+ return;
+
+ offset = rte_mbuf_dynfield_register(&iavf_proto_xtr_metadata_param);
+ if (unlikely(offset == -1)) {
+ PMD_DRV_LOG(ERR,
+ "failed to extract protocol metadata, error %d",
+ -rte_errno);
+ return;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "proto_xtr metadata offset in mbuf is : %d",
+ offset);
+ rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = offset;
+
+ for (i = 0; i < RTE_DIM(iavf_proto_xtr_params); i++) {
+ xtr_ol = &iavf_proto_xtr_params[i];
+
+ uint8_t rxdid = iavf_proto_xtr_type_to_rxdid((uint8_t)i);
+
+ if (!xtr_ol->required)
+ continue;
+
+ if (!(vf->supported_rxdid & BIT(rxdid))) {
+ PMD_DRV_LOG(ERR,
+ "rxdid[%u] is not supported in hardware",
+ rxdid);
+ rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
+ break;
+ }
+
+ offset = rte_mbuf_dynflag_register(&xtr_ol->param);
+ if (unlikely(offset == -1)) {
+ PMD_DRV_LOG(ERR,
+ "failed to register proto_xtr offload '%s', error %d",
+ xtr_ol->param.name, -rte_errno);
+
+ rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
+ break;
+ }
+
+ PMD_DRV_LOG(DEBUG,
+ "proto_xtr offload '%s' offset in mbuf is : %d",
+ xtr_ol->param.name, offset);
+ *xtr_ol->ol_flag = 1ULL << offset;
+ }
+}
+
static int
iavf_init_vf(struct rte_eth_dev *dev)
{
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ err = iavf_parse_devargs(dev);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to parse devargs");
+ goto err;
+ }
+
err = iavf_set_mac_type(hw);
if (err) {
PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
}
}
+ iavf_init_proto_xtr(dev);
+
return 0;
err_rss:
rte_free(vf->rss_key);
#include "iavf.h"
#include "iavf_rxtx.h"
+#include "rte_pmd_iavf.h"
+
+/* Offset of mbuf dynamic field for protocol extraction's metadata */
+int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
+
+/* Mask of mbuf dynamic flags for protocol extraction's type */
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+uint8_t
+iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
+{
+ static uint8_t rxdid_map[] = {
+ [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
+ [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
+ [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
+ [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
+ [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
+ [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
+ [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+ };
+
+ return flex_type < RTE_DIM(rxdid_map) ?
+ rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
+}
static inline int
check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
.release_mbufs = release_txq_mbufs,
};
+static inline void
+iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp)
+{
+ volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
+ (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ uint16_t stat_err;
+#endif
+
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+#endif
+}
+
+static inline void
+iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp)
+{
+ volatile struct iavf_32b_rx_flex_desc_comms *desc =
+ (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error1);
+
+ if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+
+ if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ metadata |=
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static inline void
+iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp)
+{
+ volatile struct iavf_32b_rx_flex_desc_comms *desc =
+ (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ if (desc->flex_ts.flex.aux0 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+ else if (desc->flex_ts.flex.aux1 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static void
+iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
+{
+ switch (rxdid) {
+ case IAVF_RXDID_COMMS_AUX_VLAN:
+ rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+ case IAVF_RXDID_COMMS_AUX_IPV4:
+ rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+ case IAVF_RXDID_COMMS_AUX_IPV6:
+ rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+ case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
+ rxq->xtr_ol_flag =
+ rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+ case IAVF_RXDID_COMMS_AUX_TCP:
+ rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+ case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
+ rxq->xtr_ol_flag =
+ rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+ rxq->rxd_to_pkt_fields =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+ break;
+ case IAVF_RXDID_COMMS_OVS_1:
+ rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+ break;
+ default:
+ /* update this according to the RXDID for FLEX_DESC_NONE */
+ rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+ break;
+ }
+
+ if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
+ rxq->xtr_ol_flag = 0;
+}
+
int
iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
struct iavf_rx_queue *rxq;
const struct rte_memzone *mz;
uint32_t ring_size;
+ uint8_t proto_xtr;
uint16_t len;
uint16_t rx_free_thresh;
return -ENOMEM;
}
- if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
- vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
- rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
+ IAVF_PROTO_XTR_NONE;
+ rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
+ rxq->proto_xtr = proto_xtr;
} else {
rxq->rxdid = IAVF_RXDID_LEGACY_1;
+ rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
}
+ iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
+
rxq->mp = mp;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_free_thresh;
}
}
+#define IAVF_RX_FLEX_ERR0_BITS \
+ ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
+ (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
+
static inline void
iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
{
} else {
mb->vlan_tci = 0;
}
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+ (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+ PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ mb->vlan_tci_outer = mb->vlan_tci;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+ PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+ rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+ rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+ } else {
+ mb->vlan_tci_outer = 0;
+ }
+#endif
}
/* Translate the rx descriptor status and error fields to pkt flags */
return flags;
}
-
-/* Translate the rx flex descriptor status to pkt flags */
-static inline void
-iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
- volatile union iavf_rx_flex_desc *rxdp)
-{
- volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
- (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
- uint16_t stat_err;
-
- stat_err = rte_le_to_cpu_16(desc->status_error0);
- if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
- mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
- }
-#endif
-
- if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
- }
-}
-
#define IAVF_RX_FLEX_ERR0_BITS \
((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
(1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
- iavf_rxd_to_pkt_fields(rxm, &rxd);
+ rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
- iavf_rxd_to_pkt_fields(first_seg, &rxd);
+ rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
first_seg->ol_flags |= pkt_flags;
mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
- iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
+ rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
if (rxq->rx_nb_avail)
return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
- if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
+ if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
else
nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
struct iavf_adapter *adapter =
IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
#ifdef RTE_ARCH_X86
struct iavf_rx_queue *rxq;
int i;
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
+/**
+ * Rx Flex Descriptors
+ * These descriptors are used instead of the legacy version descriptors
+ */
+union iavf_16b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile ID */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+ } wb; /* writeback */
+};
+
+union iavf_32b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile ID */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 time_stamp_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flex_meta2;
+ __le16 flex_meta3;
+ union {
+ struct {
+ __le16 flex_meta4;
+ __le16 flex_meta5;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+ } wb; /* writeback */
+};
+
/* HW desc structure, both 16-byte and 32-byte types are supported */
#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
#define iavf_rx_desc iavf_16byte_rx_desc
#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
#endif
+typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union iavf_rx_flex_desc *rxdp);
+
struct iavf_rxq_ops {
void (*release_mbufs)(struct iavf_rx_queue *rxq);
};
bool q_set; /* if rx queue has been configured */
bool rx_deferred_start; /* don't start this queue in dev start */
const struct iavf_rxq_ops *ops;
+ uint8_t proto_xtr; /* protocol extraction type */
+ uint64_t xtr_ol_flag;
+ /* flexible descriptor metadata extraction offload flag */
+ iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
+ /* handle flexible descriptor by RXDID */
};
struct iavf_tx_entry {
};
};
-/* Rx Flex Descriptors
- * These descriptors are used instead of the legacy version descriptors
- */
-union iavf_16b_rx_flex_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_addr is DD bit */
- } read;
- struct {
- /* Qword 0 */
- u8 rxdid; /* descriptor builder profile ID */
- u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
- __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
- __le16 pkt_len; /* [15:14] are reserved */
- __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
- /* sph=[11:11] */
- /* ff1/ext=[15:12] */
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le16 flex_meta0;
- __le16 flex_meta1;
- } wb; /* writeback */
-};
-
-union iavf_32b_rx_flex_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- /* Qword 0 */
- u8 rxdid; /* descriptor builder profile ID */
- u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
- __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
- __le16 pkt_len; /* [15:14] are reserved */
- __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
- /* sph=[11:11] */
- /* ff1/ext=[15:12] */
-
- /* Qword 1 */
- __le16 status_error0;
- __le16 l2tag1;
- __le16 flex_meta0;
- __le16 flex_meta1;
-
- /* Qword 2 */
- __le16 status_error1;
- u8 flex_flags2;
- u8 time_stamp_low;
- __le16 l2tag2_1st;
- __le16 l2tag2_2nd;
-
- /* Qword 3 */
- __le16 flex_meta2;
- __le16 flex_meta3;
- union {
- struct {
- __le16 flex_meta4;
- __le16 flex_meta5;
- } flex;
- __le32 ts_high;
- } flex_ts;
- } wb; /* writeback */
-};
-
/* Rx Flex Descriptor
* RxDID Profile ID 16-21
* Flex-field 0: RSS hash lower 16-bits
IAVF_RXDID_COMMS_AUX_TCP = 21,
IAVF_RXDID_COMMS_OVS_1 = 22,
IAVF_RXDID_COMMS_OVS_2 = 23,
+ IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
IAVF_RXDID_LAST = 63,
};
IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
+enum iavf_rx_flex_desc_status_error_1_bits {
+ /* Note: These are predefined bit offsets */
+ IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
+ IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+ /* [10:6] reserved */
+ IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+ IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+ IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+ IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+ IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+ IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
#define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
uint16_t nb_pkts);
int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
+
const uint32_t *iavf_get_default_ptype_table(void);
static inline
if (rxq->nb_rx_desc % rxq->rx_free_thresh)
return -1;
+ if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
+ return -1;
+
return 0;
}
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
- vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
- vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+ vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+ vc_qp->rxq.rxdid = rxq[i]->rxdid;
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
} else {
+ PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+ "request default RXDID[%d] in Queue[%d]",
+ rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
}
#else
if (vf->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
vf->supported_rxdid & BIT(IAVF_RXDID_LEGACY_0)) {
vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_0;
- PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
- "Queue[%d]", vc_qp->rxq.rxdid, i);
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
} else {
- PMD_DRV_LOG(ERR, "RXDID == 0 is not supported");
+ PMD_DRV_LOG(ERR, "RXDID[%d] is not supported",
+ IAVF_RXDID_LEGACY_0);
return -1;
}
#endif
objs += iavf_avx512_lib.extract_objects('iavf_rxtx_vec_avx512.c')
endif
endif
+
+headers = files('rte_pmd_iavf.h')
--- /dev/null
+/* SPDX-Liavfnse-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_PMD_IAVF_H_
+#define _RTE_PMD_IAVF_H_
+
+/**
+ * @file rte_pmd_iavf.h
+ *
+ * iavf PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notiavf
+ *
+ */
+
+#include <stdio.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * The supported network flexible descriptor's extraction metadata format.
+ */
+union rte_pmd_ifd_proto_xtr_metadata {
+ uint32_t metadata;
+
+ struct {
+ uint16_t data0;
+ uint16_t data1;
+ } raw;
+
+ struct {
+ uint16_t stag_vid:12,
+ stag_dei:1,
+ stag_pcp:3;
+ uint16_t ctag_vid:12,
+ ctag_dei:1,
+ ctag_pcp:3;
+ } vlan;
+
+ struct {
+ uint16_t protocol:8,
+ ttl:8;
+ uint16_t tos:8,
+ ihl:4,
+ version:4;
+ } ipv4;
+
+ struct {
+ uint16_t hoplimit:8,
+ nexthdr:8;
+ uint16_t flowhi4:4,
+ tc:8,
+ version:4;
+ } ipv6;
+
+ struct {
+ uint16_t flowlo16;
+ uint16_t flowhi4:4,
+ tc:8,
+ version:4;
+ } ipv6_flow;
+
+ struct {
+ uint16_t fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ ece:1,
+ cwr:1,
+ res1:4,
+ doff:4;
+ uint16_t rsvd;
+ } tcp;
+
+ uint32_t ip_ofs;
+};
+
+/* Offset of mbuf dynamic field for flexible descriptor's extraction data */
+extern int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs;
+
+/* Mask of mbuf dynamic flags for flexible descriptor's extraction type */
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+/**
+ * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
+ */
+#define RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(m) \
+ RTE_MBUF_DYNFIELD((m), \
+ rte_pmd_ifd_dynfield_proto_xtr_metadata_offs, \
+ uint32_t *)
+
+/**
+ * The mbuf dynamic flag for VLAN protocol extraction metadata, it is valid
+ * when dev_args 'proto_xtr' has 'vlan' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_VLAN \
+ (rte_pmd_ifd_dynflag_proto_xtr_vlan_mask)
+
+/**
+ * The mbuf dynamic flag for IPv4 protocol extraction metadata, it is valid
+ * when dev_args 'proto_xtr' has 'ipv4' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV4 \
+ (rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask)
+
+/**
+ * The mbuf dynamic flag for IPv6 protocol extraction metadata, it is valid
+ * when dev_args 'proto_xtr' has 'ipv6' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6 \
+ (rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask)
+
+/**
+ * The mbuf dynamic flag for IPv6 with flow protocol extraction metadata, it is
+ * valid when dev_args 'proto_xtr' has 'ipv6_flow' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW \
+ (rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask)
+
+/**
+ * The mbuf dynamic flag for TCP protocol extraction metadata, it is valid
+ * when dev_args 'proto_xtr' has 'tcp' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_TCP \
+ (rte_pmd_ifd_dynflag_proto_xtr_tcp_mask)
+
+/**
+ * The mbuf dynamic flag for IP_OFFSET extraction metadata, it is valid
+ * when dev_args 'proto_xtr' has 'ip_offset' specified.
+ */
+#define RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IP_OFFSET \
+ (rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask)
+
+/**
+ * Check if mbuf dynamic field for flexible descriptor's extraction metadata
+ * is registered.
+ *
+ * @return
+ * True if registered, false otherwise.
+ */
+__rte_experimental
+static __rte_always_inline int
+rte_pmd_ifd_dynf_proto_xtr_metadata_avail(void)
+{
+ return rte_pmd_ifd_dynfield_proto_xtr_metadata_offs != -1;
+}
+
+/**
+ * Get the mbuf dynamic field for flexible descriptor's extraction metadata.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @return
+ * The saved protocol extraction metadata.
+ */
+__rte_experimental
+static __rte_always_inline uint32_t
+rte_pmd_ifd_dynf_proto_xtr_metadata_get(struct rte_mbuf *m)
+{
+ return *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(m);
+}
+
+/**
+ * Dump the mbuf dynamic field for flexible descriptor's extraction metadata.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ */
+__rte_experimental
+static inline void
+rte_pmd_ifd_dump_proto_xtr_metadata(struct rte_mbuf *m)
+{
+ union rte_pmd_ifd_proto_xtr_metadata data;
+
+ if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
+ return;
+
+ data.metadata = rte_pmd_ifd_dynf_proto_xtr_metadata_get(m);
+
+ if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_VLAN)
+ printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x],"
+ "vlan,stag=%u:%u:%u,ctag=%u:%u:%u",
+ data.raw.data0, data.raw.data1,
+ data.vlan.stag_pcp,
+ data.vlan.stag_dei,
+ data.vlan.stag_vid,
+ data.vlan.ctag_pcp,
+ data.vlan.ctag_dei,
+ data.vlan.ctag_vid);
+ else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV4)
+ printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x],"
+ "ipv4,ver=%u,hdrlen=%u,tos=%u,ttl=%u,proto=%u",
+ data.raw.data0, data.raw.data1,
+ data.ipv4.version,
+ data.ipv4.ihl,
+ data.ipv4.tos,
+ data.ipv4.ttl,
+ data.ipv4.protocol);
+ else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6)
+ printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x],"
+ "ipv6,ver=%u,tc=%u,flow_hi4=0x%x,nexthdr=%u,hoplimit=%u",
+ data.raw.data0, data.raw.data1,
+ data.ipv6.version,
+ data.ipv6.tc,
+ data.ipv6.flowhi4,
+ data.ipv6.nexthdr,
+ data.ipv6.hoplimit);
+ else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IPV6_FLOW)
+ printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x],"
+ "ipv6_flow,ver=%u,tc=%u,flow=0x%x%04x",
+ data.raw.data0, data.raw.data1,
+ data.ipv6_flow.version,
+ data.ipv6_flow.tc,
+ data.ipv6_flow.flowhi4,
+ data.ipv6_flow.flowlo16);
+ else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_TCP)
+ printf(" - Flexible descriptor's Metadata: [0x%04x:0x%04x],"
+ "tcp,doff=%u,flags=%s%s%s%s%s%s%s%s",
+ data.raw.data0, data.raw.data1,
+ data.tcp.doff,
+ data.tcp.cwr ? "C" : "",
+ data.tcp.ece ? "E" : "",
+ data.tcp.urg ? "U" : "",
+ data.tcp.ack ? "A" : "",
+ data.tcp.psh ? "P" : "",
+ data.tcp.rst ? "R" : "",
+ data.tcp.syn ? "S" : "",
+ data.tcp.fin ? "F" : "");
+ else if (m->ol_flags & RTE_IAVF_PKT_RX_DYNF_PROTO_XTR_IP_OFFSET)
+ printf(" - Flexible descriptor's Extraction: ip_offset=%u",
+ data.ip_ofs);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PMD_IAVF_H_ */
DPDK_21 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ # added in 20.11
+ rte_pmd_ifd_dynfield_proto_xtr_metadata_offs;
+ rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
+ rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
+ rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
+ rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
+ rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
+ rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+};