(tx_next_use != tx_next_clean || tx_bd_ready < tx_bd_max)) {
mbuf = tx_bak_pkt->mbuf;
if (mbuf) {
- mbuf->next = NULL;
- rte_pktmbuf_free(mbuf);
+ rte_pktmbuf_free_seg(mbuf);
tx_bak_pkt->mbuf = NULL;
}
struct rte_mbuf *new_pkt;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
- struct rte_mbuf *temp;
uint32_t nb_hold = 0;
- uint16_t tx_next_clean;
uint16_t tx_next_use;
uint16_t tx_bd_ready;
uint16_t tx_pkt_num;
if (tx_bd_ready == 0)
return 0;
- tx_next_clean = txq->next_to_clean;
tx_next_use = txq->next_to_use;
tx_bd_max = txq->nb_tx_desc;
- tx_bak_pkt = &txq->sw_ring[tx_next_clean];
-
tx_pkt_num = (tx_bd_ready < nb_pkts) ? tx_bd_ready : nb_pkts;
/* send packets */
i = 0;
do {
fill_desc(txq, tx_next_use, m_seg, (i == 0), 0);
- temp = m_seg->next;
tx_bak_pkt->mbuf = m_seg;
- m_seg = temp;
+ m_seg = m_seg->next;
tx_next_use++;
tx_bak_pkt++;
if (tx_next_use >= tx_bd_max) {
if (likely(nb_tx)) {
hns3_queue_xmit(txq, nb_hold);
- txq->next_to_clean = tx_next_clean;
txq->tx_bd_ready = tx_bd_ready - nb_hold;
}