}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
- .release_mbufs = release_rxq_mbufs,
+static const
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
+#endif
};
-static const struct iavf_txq_ops def_txq_ops = {
- .release_mbufs = release_txq_mbufs,
+static const
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
+#ifdef RTE_ARCH_X86
+ [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
+#ifdef CC_AVX512_SUPPORT
+ [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+#endif
+#endif
+
};
static inline void
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
return nb_tx;
}
-static void __rte_cold
+void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}