#include "ice_rxtx.h"
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
static inline uint16_t
ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
if (!split_flags[buf_idx]) {
/* it's the last packet of the set */
start->hash = end->hash;
+ start->vlan_tci = end->vlan_tci;
start->ol_flags = end->ol_flags;
/* we need to strip crc for the whole packet */
start->pkt_len -= rxq->crc_len;
}
static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
uint32_t n;
* so need to free remains more carefully.
*/
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
- if (txq->tx_tail < i) {
- for (; i < txq->nb_tx_desc; i++) {
+
+#ifdef __AVX512VL__
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
+
+ if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+ dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
+ struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
+
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(swr[i].mbuf);
+ swr[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
+ rte_pktmbuf_free_seg(swr[i].mbuf);
+ swr[i].mbuf = NULL;
+ }
+ } else
+#endif
+ {
+ if (txq->tx_tail < i) {
+ for (; i < txq->nb_tx_desc; i++) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ i = 0;
+ }
+ for (; i < txq->tx_tail; i++) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
- i = 0;
- }
- for (; i < txq->tx_tail; i++) {
- rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
- txq->sw_ring[i].mbuf = NULL;
}
}
return 0;
}
+#define ICE_TX_NO_VECTOR_FLAGS ( \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD ( \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD ( \
+ RTE_ETH_RX_OFFLOAD_CHECKSUM | \
+ RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_RX_OFFLOAD_VLAN | \
+ RTE_ETH_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH 0
+#define ICE_VECTOR_OFFLOAD_PATH 1
+
static inline int
ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
{
if (rxq->proto_xtr != PROTO_XTR_NONE)
return -1;
- return 0;
-}
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ return -1;
-#define ICE_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+ return ICE_VECTOR_OFFLOAD_PATH;
+
+ return ICE_VECTOR_PATH;
+}
static inline int
ice_tx_vec_queue_default(struct ice_tx_queue *txq)
if (!txq)
return -1;
- if (txq->offloads & ICE_NO_VECTOR_FLAGS)
- return -1;
-
if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
return -1;
- return 0;
+ if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+ return -1;
+
+ if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+ return ICE_VECTOR_OFFLOAD_PATH;
+
+ return ICE_VECTOR_PATH;
}
static inline int
{
int i;
struct ice_rx_queue *rxq;
- struct ice_adapter *ad =
- ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-
- /* vPMD does not support flow mark. */
- if (ad->devargs.flow_mark_support)
- return -1;
+ int ret = 0;
+ int result = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (ice_rx_vec_queue_default(rxq))
+ ret = (ice_rx_vec_queue_default(rxq));
+ if (ret < 0)
return -1;
+ if (ret == ICE_VECTOR_OFFLOAD_PATH)
+ result = ret;
}
- return 0;
+ return result;
}
static inline int
{
int i;
struct ice_tx_queue *txq;
+ int ret = 0;
+ int result = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (ice_tx_vec_queue_default(txq))
+ ret = ice_tx_vec_queue_default(txq);
+ if (ret < 0)
return -1;
+ if (ret == ICE_VECTOR_OFFLOAD_PATH)
+ result = ret;
}
- return 0;
+ return result;
}
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+ uint64_t *txd_hi)
+{
+ uint64_t ol_flags = tx_pkt->ol_flags;
+ uint32_t td_cmd = 0;
+ uint32_t td_offset = 0;
+
+ /* Tx Checksum Offload */
+ /* SET MACLEN */
+ td_offset |= (tx_pkt->l2_len >> 1) <<
+ ICE_TX_DESC_LEN_MACLEN_S;
+
+ /* Enable L3 checksum offload */
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case RTE_MBUF_F_TX_UDP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ default:
+ break;
+ }
+
+ *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+ /* Tx VLAN/QINQ insertion Offload */
+ if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
+ td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+ *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+ ICE_TXD_QW1_L2TAG1_S);
+ }
+
+ *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
#endif