{
unsigned i;
+#ifdef RTE_IXGBE_INC_VECTOR
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ ixgbe_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+#endif
+
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL) {
void __attribute__((cold))
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i, rx_using_sse;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
dev->rx_pkt_burst = ixgbe_recv_pkts;
}
+
+ /* Propagate information about RX function choice through all queues. */
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_using_sse = rx_using_sse;
+ }
}
/**
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
#endif
+ uint16_t rx_using_sse;
+ /**< indicates that vector RX is in use */
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
uint16_t rxrearm_start; /**< the idx we start the re-arming from */
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
+void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
#ifdef RTE_IXGBE_INC_VECTOR
{
unsigned i;
struct ixgbe_tx_entry_v *txe;
- uint16_t nb_free, max_desc;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- if (txq->sw_ring != NULL) {
- /* release the used mbufs in sw_ring */
- nb_free = txq->nb_tx_free;
- max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- nb_free < max_desc && i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
- if (txe->mbuf != NULL)
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
- txe->mbuf = NULL;
- }
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
+ txe->mbuf = NULL;
}
}
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{