}
static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
uint32_t n;
#ifdef CC_AVX512_SUPPORT
struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
- if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
+ if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+ dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
if (txq->tx_tail < i) {
return 0;
}
+#define ICE_TX_NO_VECTOR_FLAGS ( \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD ( \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_QINQ_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_SCTP_CKSUM | \
+ DEV_RX_OFFLOAD_VLAN | \
+ DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH 0
+#define ICE_VECTOR_OFFLOAD_PATH 1
+
static inline int
ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
{
if (rxq->proto_xtr != PROTO_XTR_NONE)
return -1;
- return 0;
-}
+ if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+ return ICE_VECTOR_OFFLOAD_PATH;
-#define ICE_NO_VECTOR_FLAGS ( \
- DEV_TX_OFFLOAD_MULTI_SEGS | \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_SCTP_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+ return ICE_VECTOR_PATH;
+}
static inline int
ice_tx_vec_queue_default(struct ice_tx_queue *txq)
if (!txq)
return -1;
- if (txq->offloads & ICE_NO_VECTOR_FLAGS)
- return -1;
-
if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
return -1;
- return 0;
+ if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+ return -1;
+
+ if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+ return ICE_VECTOR_OFFLOAD_PATH;
+
+ return ICE_VECTOR_PATH;
}
static inline int
{
int i;
struct ice_rx_queue *rxq;
+ int ret = 0;
+ int result = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (ice_rx_vec_queue_default(rxq))
+ ret = (ice_rx_vec_queue_default(rxq));
+ if (ret < 0)
return -1;
+ if (ret == ICE_VECTOR_OFFLOAD_PATH)
+ result = ret;
}
- return 0;
+ return result;
}
static inline int
{
int i;
struct ice_tx_queue *txq;
+ int ret = 0;
+ int result = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (ice_tx_vec_queue_default(txq))
+ ret = ice_tx_vec_queue_default(txq);
+ if (ret < 0)
return -1;
+ if (ret == ICE_VECTOR_OFFLOAD_PATH)
+ result = ret;
}
- return 0;
+ return result;
}
#ifdef CC_AVX2_SUPPORT
}
#endif
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+ uint64_t *txd_hi)
+{
+ uint64_t ol_flags = tx_pkt->ol_flags;
+ uint32_t td_cmd = 0;
+ uint32_t td_offset = 0;
+
+ /* Tx Checksum Offload */
+ /* SET MACLEN */
+ td_offset |= (tx_pkt->l2_len >> 1) <<
+ ICE_TX_DESC_LEN_MACLEN_S;
+
+ /* Enable L3 checksum offload */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & PKT_TX_IPV4) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & PKT_TX_IPV6) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ default:
+ break;
+ }
+
+ *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+ /* Tx VLAN/QINQ insertion Offload */
+ if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+ td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+ *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+ ICE_TXD_QW1_L2TAG1_S);
+ }
+
+ *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
#endif