+static void
+atl_xmit_cleanup(struct atl_tx_queue *txq)
+{
+ struct atl_tx_entry *sw_ring;
+ struct hw_atl_txd_s *txd;
+ int to_clean = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq != NULL) {
+ sw_ring = txq->sw_ring;
+ int head = txq->tx_head;
+ int cnt;
+ int i;
+
+ for (i = 0, cnt = head; ; i++) {
+ txd = &txq->hw_ring[cnt];
+
+ if (txd->dd)
+ to_clean++;
+
+ cnt = (cnt + 1) % txq->nb_tx_desc;
+ if (cnt == txq->tx_tail)
+ break;
+ }
+
+ if (to_clean == 0)
+ return;
+
+ while (to_clean) {
+ txd = &txq->hw_ring[head];
+
+ struct atl_tx_entry *rx_entry = &sw_ring[head];
+
+ if (rx_entry->mbuf) {
+ rte_pktmbuf_free_seg(rx_entry->mbuf);
+ rx_entry->mbuf = NULL;
+ }
+
+ if (txd->dd)
+ to_clean--;
+
+ txd->buf_addr = 0;
+ txd->flags = 0;
+
+ head = (head + 1) % txq->nb_tx_desc;
+ txq->tx_free++;
+ }
+
+ txq->tx_head = head;
+ }
+}
+
+static int
+atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
+{
+ uint32_t tx_cmd = 0;
+ uint64_t ol_flags = tx_pkt->ol_flags;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
+
+ tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+
+ txc->cmd = 0x4;
+
+ if (ol_flags & PKT_TX_IPV6)
+ txc->cmd |= 0x2;
+
+ txc->l2_len = tx_pkt->l2_len;
+ txc->l3_len = tx_pkt->l3_len;
+ txc->l4_len = tx_pkt->l4_len;
+
+ txc->mss_len = tx_pkt->tso_segsz;
+ }
+
+ if (ol_flags & PKT_TX_VLAN) {
+ tx_cmd |= tx_desc_cmd_vlan;
+ txc->vlan_tag = tx_pkt->vlan_tci;
+ }
+
+ if (tx_cmd) {
+ txc->type = tx_desc_type_ctx;
+ txc->idx = 0;
+ }
+
+ return tx_cmd;
+}
+
+static inline void
+atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
+ uint32_t tx_cmd)
+{
+ txd->cmd |= tx_desc_cmd_fcs;
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+ /* L4 csum requested */
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= tx_cmd;
+}
+
+static inline void
+atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
+ struct rte_mbuf *tx_pkt)
+{
+ uint32_t pay_len = 0;
+ int tail = 0;
+ struct atl_tx_entry *tx_entry;
+ uint64_t buf_dma_addr;
+ struct rte_mbuf *m_seg;
+ union hw_atl_txc_s *txc = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ u32 tx_cmd = 0U;
+ int desc_count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tail = txq->tx_tail;
+
+ txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
+
+ txc->flags1 = 0U;
+ txc->flags2 = 0U;
+
+ tx_cmd = atl_tso_setup(tx_pkt, txc);
+
+ if (tx_cmd) {
+ /* We've consumed the first desc, adjust counters */
+ tail = (tail + 1) % txq->nb_tx_desc;
+ txq->tx_tail = tail;
+ txq->tx_free -= 1;
+
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ } else {
+ txd = (struct hw_atl_txd_s *)txc;
+ }
+
+ txd->ct_en = !!tx_cmd;
+
+ txd->type = tx_desc_type_desc;
+
+ atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
+
+ if (tx_cmd)
+ txd->ct_idx = 0;
+
+ pay_len = tx_pkt->pkt_len;
+
+ txd->pay_len = pay_len;
+
+ for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
+ if (desc_count > 0) {
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ }
+
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+ txd->type = tx_desc_type_desc;
+ txd->len = m_seg->data_len;
+ txd->pay_len = pay_len;
+
+ /* Store mbuf for freeing later */
+ tx_entry = &txq->sw_ring[tail];
+
+ if (tx_entry->mbuf)
+ rte_pktmbuf_free_seg(tx_entry->mbuf);
+ tx_entry->mbuf = m_seg;
+
+ tail = (tail + 1) % txq->nb_tx_desc;
+
+ desc_count++;
+ }
+
+ // Last descriptor requires EOP and WB
+ txd->eop = 1U;
+ txd->cmd |= tx_desc_cmd_wb;
+
+ hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
+
+ txq->tx_tail = tail;
+
+ txq->tx_free -= desc_count;
+}