#include <rte_ethdev_driver.h>
#include <rte_net.h>
+#include "rte_pmd_ice.h"
#include "ice_rxtx.h"
#define ICE_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
-static inline uint8_t
-ice_rxdid_to_proto_xtr_type(uint8_t rxdid)
-{
- static uint8_t xtr_map[] = {
- [ICE_RXDID_COMMS_AUX_VLAN] = PROTO_XTR_VLAN,
- [ICE_RXDID_COMMS_AUX_IPV4] = PROTO_XTR_IPV4,
- [ICE_RXDID_COMMS_AUX_IPV6] = PROTO_XTR_IPV6,
- [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = PROTO_XTR_IPV6_FLOW,
- [ICE_RXDID_COMMS_AUX_TCP] = PROTO_XTR_TCP,
+/* Offset of mbuf dynamic field for protocol extraction data */
+int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
+
+/* Mask of mbuf dynamic flags for protocol extraction type */
+uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
+uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
+
+static inline uint64_t
+ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
+{
+ static uint64_t *ol_flag_map[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] =
+ &rte_net_ice_dynflag_proto_xtr_vlan_mask,
+ [ICE_RXDID_COMMS_AUX_IPV4] =
+ &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
+ [ICE_RXDID_COMMS_AUX_IPV6] =
+ &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
+ &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
+ [ICE_RXDID_COMMS_AUX_TCP] =
+ &rte_net_ice_dynflag_proto_xtr_tcp_mask,
};
+ uint64_t *ol_flag;
- return rxdid < RTE_DIM(xtr_map) ? xtr_map[rxdid] : PROTO_XTR_NONE;
+ ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
+
+ return ol_flag != NULL ? *ol_flag : 0ULL;
}
static inline uint8_t
mb->vlan_tci, mb->vlan_tci_outer);
}
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
#define ICE_RX_PROTO_XTR_VALID \
((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
(1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+static void
+ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
+ volatile struct ice_32b_rx_flex_desc_comms *desc)
+{
+ uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
+ uint32_t metadata;
+ uint64_t ol_flag;
+
+ if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
+ return;
+
+ ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
+ if (unlikely(!ol_flag))
+ return;
+
+ mb->ol_flags |= ol_flag;
+
+ metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
+
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
+ metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+
+ *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+}
+#endif
+
static inline void
ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
volatile union ice_rx_flex_desc *rxdp)
}
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- init_proto_xtr_flds(mb);
-
- stat_err = rte_le_to_cpu_16(desc->status_error1);
- if (stat_err & ICE_RX_PROTO_XTR_VALID) {
- struct proto_xtr_flds *xtr = get_proto_xtr_flds(mb);
-
- if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
- xtr->u.raw.data0 =
- rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
-
- if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
- xtr->u.raw.data1 =
- rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
-
- xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid);
- xtr->magic = PROTO_XTR_MAGIC_ID;
- }
-
if (desc->flow_id != 0xFFFFFFFF) {
mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
}
+
+ if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
+ ice_rxd_to_proto_xtr(mb, desc);
#endif
}
}
}
+static const struct {
+ eth_rx_burst_t pkt_burst;
+ const char *info;
+} ice_rx_burst_infos[] = {
+ { ice_recv_scattered_pkts, "Scalar Scattered" },
+ { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
+ { ice_recv_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { ice_recv_pkts_vec_avx2, "Vector AVX2" },
+ { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
+ { ice_recv_pkts_vec, "Vector SSE" },
+#endif
+};
+
int
ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
{
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
- uint64_t options;
-
- if (pkt_burst == ice_recv_scattered_pkts)
- options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SCATTERED;
- else if (pkt_burst == ice_recv_pkts_bulk_alloc)
- options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_BULK_ALLOC;
- else if (pkt_burst == ice_recv_pkts)
- options = RTE_ETH_BURST_SCALAR;
-#ifdef RTE_ARCH_X86
- else if (pkt_burst == ice_recv_scattered_pkts_vec_avx2)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2 |
- RTE_ETH_BURST_SCATTERED;
- else if (pkt_burst == ice_recv_pkts_vec_avx2)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2;
- else if (pkt_burst == ice_recv_scattered_pkts_vec)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE |
- RTE_ETH_BURST_SCATTERED;
- else if (pkt_burst == ice_recv_pkts_vec)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE;
-#endif
- else
- options = 0;
+ int ret = -EINVAL;
+ unsigned int i;
- mode->options = options;
+ for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
+ if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ ice_rx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
- return options != 0 ? 0 : -EINVAL;
+ return ret;
}
void __attribute__((cold))
}
}
+static const struct {
+ eth_tx_burst_t pkt_burst;
+ const char *info;
+} ice_tx_burst_infos[] = {
+ { ice_xmit_pkts_simple, "Scalar Simple" },
+ { ice_xmit_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
+ { ice_xmit_pkts_vec, "Vector SSE" },
+#endif
+};
+
int
ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
{
eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
- uint64_t options;
-
- if (pkt_burst == ice_xmit_pkts_simple)
- options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SIMPLE;
- else if (pkt_burst == ice_xmit_pkts)
- options = RTE_ETH_BURST_SCALAR;
-#ifdef RTE_ARCH_X86
- else if (pkt_burst == ice_xmit_pkts_vec_avx2)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2;
- else if (pkt_burst == ice_xmit_pkts_vec)
- options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE;
-#endif
- else
- options = 0;
+ int ret = -EINVAL;
+ unsigned int i;
- mode->options = options;
+ for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
+ if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ ice_tx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
- return options != 0 ? 0 : -EINVAL;
+ return ret;
}
/* For each value it means, datasheet of hardware can tell more details