NOTE: In Safe mode, only very limited features are available, features like RSS,
checksum, fdir, tunneling ... are all disabled.
+- ``Protocol extraction for per queue``
+
+ Configure the RX queues to do protocol extraction into ``rte_mbuf::udata64``
+ for protocol handling acceleration, like checking the TCP SYN packets quickly.
+
+ The argument format is::
+
+ -w 18:00.0,proto_xtr=<queues:protocol>[<queues:protocol>...]
+ -w 18:00.0,proto_xtr=<protocol>
+
+ Queues are grouped by ``(`` and ``)`` within the group. The ``-`` character
+ is used as a range separator and ``,`` is used as a single number separator.
+ The grouping ``()`` can be omitted for single element group. If no queues are
+ specified, PMD will use this protocol extraction type for all queues.
+
+ Protocol is : ``vlan, ipv4, ipv6, ipv6_flow, tcp``.
+
+ .. code-block:: console
+
+ testpmd -w 18:00.0,proto_xtr='[(1,2-3,8-9):tcp,10-13:vlan]'
+
+ This setting means queues 1, 2-3, 8-9 are TCP extraction, queues 10-13 are
+ VLAN extraction, other queues run with no protocol extraction.
+
+ .. code-block:: console
+
+ testpmd -w 18:00.0,proto_xtr=vlan,proto_xtr='[(1,2-3,8-9):tcp,10-23:ipv6]'
+
+ This setting means queues 1, 2-3, 8-9 are TCP extraction, queues 10-23 are
+ IPv6 extraction, other queues use the default VLAN extraction.
+
+ The extraction will be copied into the lower 32 bit of ``rte_mbuf::udata64``.
+
+ .. table:: Protocol extraction : ``vlan``
+
+ +----------------------------+----------------------------+
+ | VLAN2 | VLAN1 |
+ +======+===+=================+======+===+=================+
+ | PCP | D | VID | PCP | D | VID |
+ +------+---+-----------------+------+---+-----------------+
+
+ VLAN1 - single or EVLAN (first for QinQ).
+
+ VLAN2 - C-VLAN (second for QinQ).
+
+ .. table:: Protocol extraction : ``ipv4``
+
+ +----------------------------+----------------------------+
+ | IPHDR2 | IPHDR1 |
+ +======+=======+=============+==============+=============+
+ | Ver |Hdr Len| ToS | TTL | Protocol |
+ +------+-------+-------------+--------------+-------------+
+
+ IPHDR1 - IPv4 header word 4, "TTL" and "Protocol" fields.
+
+ IPHDR2 - IPv4 header word 0, "Ver", "Hdr Len" and "Type of Service" fields.
+
+ .. table:: Protocol extraction : ``ipv6``
+
+ +----------------------------+----------------------------+
+ | IPHDR2 | IPHDR1 |
+ +=====+=============+========+=============+==============+
+ | Ver |Traffic class| Flow | Next Header | Hop Limit |
+ +-----+-------------+--------+-------------+--------------+
+
+ IPHDR1 - IPv6 header word 3, "Next Header" and "Hop Limit" fields.
+
+ IPHDR2 - IPv6 header word 0, "Ver", "Traffic class" and high 4 bits of
+ "Flow Label" fields.
+
+ .. table:: Protocol extraction : ``ipv6_flow``
+
+ +----------------------------+----------------------------+
+ | IPHDR2 | IPHDR1 |
+ +=====+=============+========+============================+
+ | Ver |Traffic class| Flow Label |
+ +-----+-------------+-------------------------------------+
+
+ IPHDR1 - IPv6 header word 1, 16 low bits of the "Flow Label" field.
+
+ IPHDR2 - IPv6 header word 0, "Ver", "Traffic class" and high 4 bits of
+ "Flow Label" fields.
+
+ .. table:: Protocol extraction : ``tcp``
+
+ +----------------------------+----------------------------+
+ | TCPHDR2 | TCPHDR1 |
+ +============================+======+======+==============+
+ | Reserved |Offset| RSV | Flags |
+ +----------------------------+------+------+--------------+
+
+ TCPHDR1 - TCP header word 6, "Data Offset" and "Flags" fields.
+
+ TCPHDR2 - Reserved
+
+ Use ``get_proto_xtr_flds(struct rte_mbuf *mb)`` to access the protocol
+ extraction, do not use ``rte_mbuf::udata64`` directly.
+
+ The ``dump_proto_xtr_flds(struct rte_mbuf *mb)`` routine shows how to
+ access the protocol extraction result in ``struct rte_mbuf``.
+
Driver compilation and testing
------------------------------
Updated the Intel ice driver with new features and improvements, including:
* Added support for device-specific DDP package loading.
+ * Added support for handling Receive Flex Descriptor.
+ * Added support for protocol extraction on per Rx queue.
* **Added Marvell NITROX symmetric crypto PMD.**
endif
SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_ICE_PMD)-include := rte_pmd_ice.h
+
include $(RTE_SDK)/mk/rte.lib.mk
/* devargs */
#define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
+#define ICE_PROTO_XTR_ARG "proto_xtr"
static const char * const ice_valid_args[] = {
ICE_SAFE_MODE_SUPPORT_ARG,
+ ICE_PROTO_XTR_ARG,
NULL
};
hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
}
+static int
+lookup_proto_xtr_type(const char *xtr_name)
+{
+ static struct {
+ const char *name;
+ enum proto_xtr_type type;
+ } xtr_type_map[] = {
+ { "vlan", PROTO_XTR_VLAN },
+ { "ipv4", PROTO_XTR_IPV4 },
+ { "ipv6", PROTO_XTR_IPV6 },
+ { "ipv6_flow", PROTO_XTR_IPV6_FLOW },
+ { "tcp", PROTO_XTR_TCP },
+ };
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(xtr_type_map); i++) {
+ if (strcmp(xtr_name, xtr_type_map[i].name) == 0)
+ return xtr_type_map[i].type;
+ }
+
+ return -1;
+}
+
+/*
+ * Parse elem, the elem could be single number/range or '(' ')' group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
+ * Within group elem, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+static int
+parse_queue_set(const char *input, int xtr_type, struct ice_devargs *devargs)
+{
+ const char *str = input;
+ char *end = NULL;
+ uint32_t min, max;
+ uint32_t idx;
+
+ while (isblank(*str))
+ str++;
+
+ if (!isdigit(*str) && *str != '(')
+ return -1;
+
+ /* process single number or single range of number */
+ if (*str != '(') {
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
+ return -1;
+
+ while (isblank(*end))
+ end++;
+
+ min = idx;
+ max = idx;
+
+ /* process single <number>-<number> */
+ if (*end == '-') {
+ end++;
+ while (isblank(*end))
+ end++;
+ if (!isdigit(*end))
+ return -1;
+
+ errno = 0;
+ idx = strtoul(end, &end, 10);
+ if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
+ return -1;
+
+ max = idx;
+ while (isblank(*end))
+ end++;
+ }
+
+ if (*end != ':')
+ return -1;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ devargs->proto_xtr[idx] = xtr_type;
+
+ return 0;
+ }
+
+ /* process set within bracket */
+ str++;
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = ICE_MAX_QUEUE_NUM;
+ do {
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= ICE_MAX_QUEUE_NUM)
+ return -1;
+
+ /* go ahead to separator '-',',' and ')' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == ICE_MAX_QUEUE_NUM)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if (*end == ',' || *end == ')') {
+ max = idx;
+ if (min == ICE_MAX_QUEUE_NUM)
+ min = idx;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++)
+ devargs->proto_xtr[idx] = xtr_type;
+
+ min = ICE_MAX_QUEUE_NUM;
+ } else {
+ return -1;
+ }
+
+ str = end + 1;
+ } while (*end != ')' && *end != '\0');
+
+ return 0;
+}
+
+static int
+parse_queue_proto_xtr(const char *queues, struct ice_devargs *devargs)
+{
+ const char *queue_start;
+ uint32_t idx;
+ int xtr_type;
+ char xtr_name[32];
+
+ while (isblank(*queues))
+ queues++;
+
+ if (*queues != '[') {
+ xtr_type = lookup_proto_xtr_type(queues);
+ if (xtr_type < 0)
+ return -1;
+
+ memset(devargs->proto_xtr, xtr_type,
+ sizeof(devargs->proto_xtr));
+
+ return 0;
+ }
+
+ queues++;
+ do {
+ while (isblank(*queues))
+ queues++;
+ if (*queues == '\0')
+ return -1;
+
+ queue_start = queues;
+
+ /* go across a complete bracket */
+ if (*queue_start == '(') {
+ queues += strcspn(queues, ")");
+ if (*queues != ')')
+ return -1;
+ }
+
+ /* scan the separator ':' */
+ queues += strcspn(queues, ":");
+ if (*queues++ != ':')
+ return -1;
+ while (isblank(*queues))
+ queues++;
+
+ for (idx = 0; ; idx++) {
+ if (isblank(queues[idx]) ||
+ queues[idx] == ',' ||
+ queues[idx] == ']' ||
+ queues[idx] == '\0')
+ break;
+
+ if (idx > sizeof(xtr_name) - 2)
+ return -1;
+
+ xtr_name[idx] = queues[idx];
+ }
+ xtr_name[idx] = '\0';
+ xtr_type = lookup_proto_xtr_type(xtr_name);
+ if (xtr_type < 0)
+ return -1;
+
+ queues += idx;
+
+ while (isblank(*queues) || *queues == ',' || *queues == ']')
+ queues++;
+
+ if (parse_queue_set(queue_start, xtr_type, devargs) < 0)
+ return -1;
+ } while (*queues != '\0');
+
+ return 0;
+}
+
+static int
+handle_proto_xtr_arg(__rte_unused const char *key, const char *value,
+ void *extra_args)
+{
+ struct ice_devargs *devargs = extra_args;
+
+ if (value == NULL || extra_args == NULL)
+ return -EINVAL;
+
+ if (parse_queue_proto_xtr(value, devargs) < 0) {
+ PMD_DRV_LOG(ERR,
+ "The protocol extraction parameter is wrong : '%s'",
+ value);
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool
+ice_proto_xtr_support(struct ice_hw *hw)
+{
+#define FLX_REG(val, fld, idx) \
+ (((val) & GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_M) >> \
+ GLFLXP_RXDID_FLX_WRD_##idx##_##fld##_S)
+ static struct {
+ uint32_t rxdid;
+ uint16_t protid_0;
+ uint16_t protid_1;
+ } xtr_sets[] = {
+ { ICE_RXDID_COMMS_AUX_VLAN, ICE_PROT_EVLAN_O, ICE_PROT_VLAN_O },
+ { ICE_RXDID_COMMS_AUX_IPV4, ICE_PROT_IPV4_OF_OR_S,
+ ICE_PROT_IPV4_OF_OR_S },
+ { ICE_RXDID_COMMS_AUX_IPV6, ICE_PROT_IPV6_OF_OR_S,
+ ICE_PROT_IPV6_OF_OR_S },
+ { ICE_RXDID_COMMS_AUX_IPV6_FLOW, ICE_PROT_IPV6_OF_OR_S,
+ ICE_PROT_IPV6_OF_OR_S },
+ { ICE_RXDID_COMMS_AUX_TCP, ICE_PROT_TCP_IL, ICE_PROT_ID_INVAL },
+ };
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(xtr_sets); i++) {
+ uint32_t rxdid = xtr_sets[i].rxdid;
+ uint32_t v;
+
+ if (xtr_sets[i].protid_0 != ICE_PROT_ID_INVAL) {
+ v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_4(rxdid));
+
+ if (FLX_REG(v, PROT_MDID, 4) != xtr_sets[i].protid_0 ||
+ FLX_REG(v, RXDID_OPCODE, 4) != ICE_RX_OPC_EXTRACT)
+ return false;
+ }
+
+ if (xtr_sets[i].protid_1 != ICE_PROT_ID_INVAL) {
+ v = ICE_READ_REG(hw, GLFLXP_RXDID_FLX_WRD_5(rxdid));
+
+ if (FLX_REG(v, PROT_MDID, 5) != xtr_sets[i].protid_1 ||
+ FLX_REG(v, RXDID_OPCODE, 5) != ICE_RX_OPC_EXTRACT)
+ return false;
+ }
+ }
+
+ return true;
+}
+
static int
ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
uint32_t num)
static int
ice_pf_sw_init(struct rte_eth_dev *dev)
{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct ice_hw *hw = ICE_PF_TO_HW(pf);
pf->lan_nb_qps = pf->lan_nb_qp_max;
+ if (ice_proto_xtr_support(hw))
+ pf->proto_xtr = rte_zmalloc(NULL, pf->lan_nb_qps, 0);
+
+ if (pf->proto_xtr != NULL)
+ rte_memcpy(pf->proto_xtr, ad->devargs.proto_xtr,
+ RTE_MIN((size_t)pf->lan_nb_qps,
+ sizeof(ad->devargs.proto_xtr)));
+ else
+ PMD_DRV_LOG(NOTICE, "Protocol extraction is disabled");
+
return 0;
}
return -EINVAL;
}
+ memset(ad->devargs.proto_xtr, PROTO_XTR_NONE,
+ sizeof(ad->devargs.proto_xtr));
+
+ ret = rte_kvargs_process(kvlist, ICE_PROTO_XTR_ARG,
+ &handle_proto_xtr_arg, &ad->devargs);
+ if (ret)
+ goto bail;
+
ret = rte_kvargs_process(kvlist, ICE_SAFE_MODE_SUPPORT_ARG,
&parse_bool, &ad->devargs.safe_mode_support);
+bail:
rte_kvargs_free(kvlist);
return ret;
}
ice_sched_cleanup_all(hw);
rte_free(hw->port_info);
ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
return ret;
}
rte_free(hw->port_info);
hw->port_info = NULL;
ice_shutdown_all_ctrlq(hw);
+ rte_free(pf->proto_xtr);
+ pf->proto_xtr = NULL;
}
static int
RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_ice,
+ ICE_PROTO_XTR_ARG "=[queue:]<vlan|ipv4|ipv6|ipv6_flow|tcp>"
ICE_SAFE_MODE_SUPPORT_ARG "=<0|1>");
RTE_INIT(ice_init_log)
uint16_t lan_nb_qp_max;
uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
uint16_t base_queue; /* The base queue pairs index in the device */
+ uint8_t *proto_xtr; /* Protocol extraction type for all queues */
struct ice_hw_port_stats stats_offset;
struct ice_hw_port_stats stats;
/* internal packet statistics, it should be excluded from the total */
struct ice_flow_list flow_list;
};
+#define ICE_MAX_QUEUE_NUM 2048
+
/**
* Cache devargs parse result.
*/
struct ice_devargs {
int safe_mode_support;
+ uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
};
/**
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
+static inline uint8_t
+ice_rxdid_to_proto_xtr_type(uint8_t rxdid)
+{
+ static uint8_t xtr_map[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = PROTO_XTR_VLAN,
+ [ICE_RXDID_COMMS_AUX_IPV4] = PROTO_XTR_IPV4,
+ [ICE_RXDID_COMMS_AUX_IPV6] = PROTO_XTR_IPV6,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = PROTO_XTR_IPV6_FLOW,
+ [ICE_RXDID_COMMS_AUX_TCP] = PROTO_XTR_TCP,
+ };
+
+ return rxdid < RTE_DIM(xtr_map) ? xtr_map[rxdid] : PROTO_XTR_NONE;
+}
+
+static inline uint8_t
+ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
+{
+ static uint8_t rxdid_map[] = {
+ [PROTO_XTR_NONE] = ICE_RXDID_COMMS_GENERIC,
+ [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN,
+ [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4,
+ [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6,
+ [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
+ [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP,
+ };
+
+ return xtr_type < RTE_DIM(rxdid_map) ?
+ rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
+}
static enum ice_status
ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
rx_ctx.showiv = 0;
rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
+ rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
+
+ PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
+ rxq->port_id, rxq->queue_id, rxdid);
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->proto_xtr = pf->proto_xtr != NULL ?
+ pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
mb->vlan_tci, mb->vlan_tci_outer);
}
+#define ICE_RX_PROTO_XTR_VALID \
+ ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
+ (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+
static inline void
ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
volatile union ice_rx_flex_desc *rxdp)
mb->ol_flags |= PKT_RX_RSS_HASH;
mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
}
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ init_proto_xtr_flds(mb);
+
+ stat_err = rte_le_to_cpu_16(desc->status_error1);
+ if (stat_err & ICE_RX_PROTO_XTR_VALID) {
+ struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ xtr->u.raw.data0 =
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ xtr->u.raw.data1 =
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+ xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
+ xtr->magic = PROTO_XTR_MAGIC_ID;
+ }
+#endif
}
#ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC
#ifndef _ICE_RXTX_H_
#define _ICE_RXTX_H_
+#include "rte_pmd_ice.h"
#include "ice_ethdev.h"
#define ICE_ALIGN_RING_DESC 32
uint16_t max_pkt_len; /* Maximum packet length */
bool q_set; /* indicate if rx queue has been configured */
bool rx_deferred_start; /* don't start this queue in dev start */
+ uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
ice_rx_release_mbufs_t rx_rel_mbufs;
};
if (rxq->nb_rx_desc % rxq->rx_free_thresh)
return -1;
+ if (rxq->proto_xtr != PROTO_XTR_NONE)
+ return -1;
+
return 0;
}
objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c')
endif
endif
+
+install_headers('rte_pmd_ice.h')
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_PMD_ICE_H_
+#define _RTE_PMD_ICE_H_
+
+#include <stdio.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum proto_xtr_type {
+ PROTO_XTR_NONE,
+ PROTO_XTR_VLAN,
+ PROTO_XTR_IPV4,
+ PROTO_XTR_IPV6,
+ PROTO_XTR_IPV6_FLOW,
+ PROTO_XTR_TCP,
+};
+
+struct proto_xtr_flds {
+ union {
+ struct {
+ uint16_t data0;
+ uint16_t data1;
+ } raw;
+ struct {
+ uint16_t stag_vid:12,
+ stag_dei:1,
+ stag_pcp:3;
+ uint16_t ctag_vid:12,
+ ctag_dei:1,
+ ctag_pcp:3;
+ } vlan;
+ struct {
+ uint16_t protocol:8,
+ ttl:8;
+ uint16_t tos:8,
+ ihl:4,
+ version:4;
+ } ipv4;
+ struct {
+ uint16_t hoplimit:8,
+ nexthdr:8;
+ uint16_t flowhi4:4,
+ tc:8,
+ version:4;
+ } ipv6;
+ struct {
+ uint16_t flowlo16;
+ uint16_t flowhi4:4,
+ tc:8,
+ version:4;
+ } ipv6_flow;
+ struct {
+ uint16_t fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ ece:1,
+ cwr:1,
+ res1:4,
+ doff:4;
+ uint16_t rsvd;
+ } tcp;
+ } u;
+
+ uint16_t rsvd;
+
+ uint8_t type;
+
+#define PROTO_XTR_MAGIC_ID 0xCE
+ uint8_t magic;
+};
+
+static inline void
+init_proto_xtr_flds(struct rte_mbuf *mb)
+{
+ mb->udata64 = 0;
+}
+
+static inline struct proto_xtr_flds *
+get_proto_xtr_flds(struct rte_mbuf *mb)
+{
+ RTE_BUILD_BUG_ON(sizeof(struct proto_xtr_flds) > sizeof(mb->udata64));
+
+ return (struct proto_xtr_flds *)&mb->udata64;
+}
+
+static inline void
+dump_proto_xtr_flds(struct rte_mbuf *mb)
+{
+ struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
+
+ if (xtr->magic != PROTO_XTR_MAGIC_ID || xtr->type == PROTO_XTR_NONE)
+ return;
+
+ printf(" - Protocol Extraction:[0x%04x:0x%04x],",
+ xtr->u.raw.data0, xtr->u.raw.data1);
+
+ if (xtr->type == PROTO_XTR_VLAN)
+ printf("vlan,stag=%u:%u:%u,ctag=%u:%u:%u ",
+ xtr->u.vlan.stag_pcp,
+ xtr->u.vlan.stag_dei,
+ xtr->u.vlan.stag_vid,
+ xtr->u.vlan.ctag_pcp,
+ xtr->u.vlan.ctag_dei,
+ xtr->u.vlan.ctag_vid);
+ else if (xtr->type == PROTO_XTR_IPV4)
+ printf("ipv4,ver=%u,hdrlen=%u,tos=%u,ttl=%u,proto=%u ",
+ xtr->u.ipv4.version,
+ xtr->u.ipv4.ihl,
+ xtr->u.ipv4.tos,
+ xtr->u.ipv4.ttl,
+ xtr->u.ipv4.protocol);
+ else if (xtr->type == PROTO_XTR_IPV6)
+ printf("ipv6,ver=%u,tc=%u,flow_hi4=0x%x,nexthdr=%u,hoplimit=%u ",
+ xtr->u.ipv6.version,
+ xtr->u.ipv6.tc,
+ xtr->u.ipv6.flowhi4,
+ xtr->u.ipv6.nexthdr,
+ xtr->u.ipv6.hoplimit);
+ else if (xtr->type == PROTO_XTR_IPV6_FLOW)
+ printf("ipv6_flow,ver=%u,tc=%u,flow=0x%x%04x ",
+ xtr->u.ipv6_flow.version,
+ xtr->u.ipv6_flow.tc,
+ xtr->u.ipv6_flow.flowhi4,
+ xtr->u.ipv6_flow.flowlo16);
+ else if (xtr->type == PROTO_XTR_TCP)
+ printf("tcp,doff=%u,flags=%s%s%s%s%s%s%s%s ",
+ xtr->u.tcp.doff,
+ xtr->u.tcp.cwr ? "C" : "",
+ xtr->u.tcp.ece ? "E" : "",
+ xtr->u.tcp.urg ? "U" : "",
+ xtr->u.tcp.ack ? "A" : "",
+ xtr->u.tcp.psh ? "P" : "",
+ xtr->u.tcp.rst ? "R" : "",
+ xtr->u.tcp.syn ? "S" : "",
+ xtr->u.tcp.fin ? "F" : "");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PMD_ICE_H_ */