/* Free all mbufs for descriptors in rx queue */
static void
-ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+_ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
{
uint16_t i;
#endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */
}
+static void
+ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
+{
+ rxq->rx_rel_mbufs(rxq);
+}
+
/* turn on or off rx queue
* @q_idx: queue index in pf scope
* @on: turn on or off the queue
/* Free all mbufs for descriptors in tx queue */
static void
-ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
{
uint16_t i;
}
}
}
+static void
+ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+{
+ txq->tx_rel_mbufs(txq);
+}
static void
ice_reset_tx_queue(struct ice_tx_queue *txq)
ice_reset_rx_queue(rxq);
rxq->q_set = TRUE;
dev->data->rx_queues[queue_idx] = rxq;
+ rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
ice_reset_tx_queue(txq);
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
+ txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
return 0;
}
#define ICE_SUPPORT_CHAIN_NUM 5
+typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
+typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
+
struct ice_rx_entry {
struct rte_mbuf *mbuf;
};
uint16_t max_pkt_len; /* Maximum packet length */
bool q_set; /* indicate if rx queue has been configured */
bool rx_deferred_start; /* don't start this queue in dev start */
+ ice_rx_release_mbufs_t rx_rel_mbufs;
};
struct ice_tx_entry {
uint16_t tx_next_rs;
bool tx_deferred_start; /* don't start this queue in dev start */
bool q_set; /* indicate if tx queue has been configured */
+ ice_tx_release_mbufs_t tx_rel_mbufs;
};
/* Offload features */