else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
return 0;
/* happy day case, full burst + no packets to be joined */
- const uint32_t *split_fl32 = (uint32_t *)split_flags;
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
if (rxq->pkt_first_seg == NULL &&
- split_fl32[0] == 0 && split_fl32[1] == 0 &&
- split_fl32[2] == 0 && split_fl32[3] == 0)
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
return nb_bufs;
/* reassemble any packets that need reassembly*/
* first buffer to free from S/W ring is at index
* tx_next_dd - (tx_rs_thresh-1)
*/
- txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd -
- (n - 1)];
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
tx_id = txq->tx_tail;
txdp = &txq->tx_ring[tx_id];
- txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id];
+ txep = &txq->sw_ring_v[tx_id];
txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
/* avoid reach the end of ring */
txdp = &(txq->tx_ring[tx_id]);
- txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]);
+ txep = &txq->sw_ring_v[tx_id];
}
tx_backlog_entry(txep, tx_pkts, nb_commit);
}
static void __attribute__((cold))
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
unsigned i;
struct ixgbe_tx_entry_v *txe;
- uint16_t nb_free, max_desc;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- if (txq->sw_ring != NULL) {
- /* release the used mbufs in sw_ring */
- nb_free = txq->nb_tx_free;
- max_desc = (uint16_t)(txq->nb_tx_desc - 1);
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- nb_free < max_desc && i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
- /*
- * Check for already freed packets.
- * Note: ixgbe_tx_free_bufs does not NULL after free,
- * so we actually have to check the reference count.
- */
- if (txe->mbuf != NULL &&
- rte_mbuf_refcnt_read(txe->mbuf) != 0)
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i];
- txe->mbuf = NULL;
- }
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
}
}
+void __attribute__((cold))
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
return;
if (txq->sw_ring != NULL) {
- rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1);
- txq->sw_ring = NULL;
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
}
}
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
- struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring;
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
uint16_t i;
/* Zero out HW ring memory */
}
static const struct ixgbe_txq_ops vec_txq_ops = {
- .release_mbufs = ixgbe_tx_queue_release_mbufs,
+ .release_mbufs = ixgbe_tx_queue_release_mbufs_vec,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
};
int __attribute__((cold))
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
- if (txq->sw_ring == NULL)
+ if (txq->sw_ring_v == NULL)
return -1;
/* leave the first one for overflow */
- txq->sw_ring = (struct ixgbe_tx_entry *)
- ((struct ixgbe_tx_entry_v *)txq->sw_ring + 1);
+ txq->sw_ring_v = txq->sw_ring_v + 1;
txq->ops = &vec_txq_ops;
return 0;