+int
+atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, true);
+}
+
+int
+atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ return atl_rx_enable_intr(eth_dev, queue_id, false);
+}
+
+uint16_t
+atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+static uint64_t
+atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
+ struct hw_atl_rxd_wb_s *rxd_wb)
+{
+ uint64_t mbuf_flags = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* IPv4 ? */
+ if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
+ /* IPv4 csum error ? */
+ if (rxd_wb->rx_stat & BIT(1))
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ } else {
+ mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ }
+
+ /* CSUM calculated ? */
+ if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
+ if (rxd_wb->rx_stat & BIT(2))
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ } else {
+ mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)
+{
+ uint32_t type = RTE_PTYPE_UNKNOWN;
+ uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;
+ uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;
+
+ switch (l2_l3_type) {
+ case 0:
+ type = RTE_PTYPE_L3_IPV4;
+ break;
+ case 1:
+ type = RTE_PTYPE_L3_IPV6;
+ break;
+ case 2:
+ type = RTE_PTYPE_L2_ETHER;
+ break;
+ case 3:
+ type = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ }
+
+ switch (l4_type) {
+ case 0:
+ type |= RTE_PTYPE_L4_TCP;
+ break;
+ case 1:
+ type |= RTE_PTYPE_L4_UDP;
+ break;
+ case 2:
+ type |= RTE_PTYPE_L4_SCTP;
+ break;
+ case 3:
+ type |= RTE_PTYPE_L4_ICMP;
+ break;
+ }
+
+ if (rxd_wb->pkt_type & BIT(5))
+ type |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ return type;
+}
+
+uint16_t
+atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ struct atl_adapter *adapter =
+ ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
+ struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
+ struct aq_hw_cfg_s *cfg =
+ ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
+ struct atl_rx_entry *sw_ring = rxq->sw_ring;
+
+ struct rte_mbuf *new_mbuf;
+ struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;
+ struct atl_rx_entry *rx_entry;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = 0;
+ struct hw_atl_rxd_wb_s rxd_wb;
+ struct hw_atl_rxd_s *rxd = NULL;
+ uint16_t tail = rxq->rx_tail;
+ uint64_t dma_addr;
+ uint16_t pkt_len = 0;
+
+ while (nb_rx < nb_pkts) {
+ uint16_t eop_tail = tail;
+
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
+ rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
+
+ if (!rxd_wb.dd) { /* RxD is not done */
+ break;
+ }
+
+ PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+ "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id,
+ (unsigned int)tail, (unsigned int)rxd_wb.eop,
+ (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),
+ rxd_wb.rss_hash, rxd_wb.rss_type);
+
+ /* RxD is not done */
+ if (!rxd_wb.eop) {
+ while (true) {
+ struct hw_atl_rxd_wb_s *eop_rxwbd;
+
+ eop_tail = (eop_tail + 1) % rxq->nb_rx_desc;
+ eop_rxwbd = (struct hw_atl_rxd_wb_s *)
+ &rxq->hw_ring[eop_tail];
+ if (!eop_rxwbd->dd) {
+ /* no EOP received yet */
+ eop_tail = tail;
+ break;
+ }
+ if (eop_rxwbd->dd && eop_rxwbd->eop)
+ break;
+ }
+ /* No EOP in ring */
+ if (eop_tail == tail)
+ break;
+ }
+ rx_mbuf_prev = NULL;
+ rx_mbuf_first = NULL;
+
+ /* Run through packet segments */
+ while (true) {
+ new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (new_mbuf == NULL) {
+ PMD_RX_LOG(ERR,
+ "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ dev->data->rx_mbuf_alloc_failed++;
+ adapter->sw_stats.rx_nombuf++;
+ goto err_stop;
+ }
+
+ nb_hold++;
+ rx_entry = &sw_ring[tail];
+
+ rx_mbuf = rx_entry->mbuf;
+ rx_entry->mbuf = new_mbuf;
+ dma_addr = rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(new_mbuf));
+
+ /* setup RX descriptor */
+ rxd->hdr_addr = 0;
+ rxd->buf_addr = dma_addr;
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * < - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);
+ rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_prefetch1((char *)rx_mbuf->buf_addr +
+ rx_mbuf->data_off);
+ rx_mbuf->nb_segs = 0;
+ rx_mbuf->next = NULL;
+ rx_mbuf->pkt_len = pkt_len;
+ rx_mbuf->data_len = pkt_len;
+ if (rxd_wb.eop) {
+ u16 remainder_len = pkt_len % rxq->buff_size;
+ if (!remainder_len)
+ remainder_len = rxq->buff_size;
+ rx_mbuf->data_len = remainder_len;
+ } else {
+ rx_mbuf->data_len = pkt_len > rxq->buff_size ?
+ rxq->buff_size : pkt_len;
+ }
+ rx_mbuf->port = rxq->port_id;
+
+ rx_mbuf->hash.rss = rxd_wb.rss_hash;
+
+ rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+ rx_mbuf->ol_flags =
+ atl_desc_to_offload_flags(rxq, &rxd_wb);
+
+ rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
+
+ if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
+ rx_mbuf->ol_flags |= PKT_RX_VLAN;
+ rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+ if (cfg->vlan_strip)
+ rx_mbuf->ol_flags |=
+ PKT_RX_VLAN_STRIPPED;
+ }
+
+ if (!rx_mbuf_first)
+ rx_mbuf_first = rx_mbuf;
+ rx_mbuf_first->nb_segs++;
+
+ if (rx_mbuf_prev)
+ rx_mbuf_prev->next = rx_mbuf;
+ rx_mbuf_prev = rx_mbuf;
+
+ tail = (tail + 1) % rxq->nb_rx_desc;
+ /* Prefetch next mbufs */
+ rte_prefetch0(sw_ring[tail].mbuf);
+ if ((tail & 0x3) == 0) {
+ rte_prefetch0(&sw_ring[tail]);
+ rte_prefetch0(&sw_ring[tail]);
+ }
+
+ /* filled mbuf_first */
+ if (rxd_wb.eop)
+ break;
+ rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
+ rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
+ };
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rx_mbuf_first;
+ adapter->sw_stats.q_ipackets[rxq->queue_id]++;
+ adapter->sw_stats.q_ibytes[rxq->queue_id] +=
+ rx_mbuf_first->pkt_len;
+
+ PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+ rx_mbuf_first->nb_segs,
+ rx_mbuf_first->pkt_len);
+ }
+
+err_stop:
+
+ rxq->rx_tail = tail;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
+ (unsigned int)tail, (unsigned int)nb_hold,
+ (unsigned int)nb_rx);
+ tail = (uint16_t)((tail == 0) ?
+ (rxq->nb_rx_desc - 1) : (tail - 1));
+
+ hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);
+
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+
+ return nb_rx;
+}
+
+static void
+atl_xmit_cleanup(struct atl_tx_queue *txq)
+{
+ struct atl_tx_entry *sw_ring;
+ struct hw_atl_txd_s *txd;
+ int to_clean = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (txq != NULL) {
+ sw_ring = txq->sw_ring;
+ int head = txq->tx_head;
+ int cnt;
+ int i;
+
+ for (i = 0, cnt = head; ; i++) {
+ txd = &txq->hw_ring[cnt];
+
+ if (txd->dd)
+ to_clean++;
+
+ cnt = (cnt + 1) % txq->nb_tx_desc;
+ if (cnt == txq->tx_tail)
+ break;
+ }
+
+ if (to_clean == 0)
+ return;
+
+ while (to_clean) {
+ txd = &txq->hw_ring[head];
+
+ struct atl_tx_entry *rx_entry = &sw_ring[head];
+
+ if (rx_entry->mbuf) {
+ rte_pktmbuf_free_seg(rx_entry->mbuf);
+ rx_entry->mbuf = NULL;
+ }
+
+ if (txd->dd)
+ to_clean--;
+
+ txd->buf_addr = 0;
+ txd->flags = 0;
+
+ head = (head + 1) % txq->nb_tx_desc;
+ txq->tx_free++;
+ }
+
+ txq->tx_head = head;
+ }
+}
+
+static int
+atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
+{
+ uint32_t tx_cmd = 0;
+ uint64_t ol_flags = tx_pkt->ol_flags;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
+
+ tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+
+ txc->cmd = 0x4;
+
+ if (ol_flags & PKT_TX_IPV6)
+ txc->cmd |= 0x2;
+
+ txc->l2_len = tx_pkt->l2_len;
+ txc->l3_len = tx_pkt->l3_len;
+ txc->l4_len = tx_pkt->l4_len;
+
+ txc->mss_len = tx_pkt->tso_segsz;
+ }
+
+ if (ol_flags & PKT_TX_VLAN) {
+ tx_cmd |= tx_desc_cmd_vlan;
+ txc->vlan_tag = tx_pkt->vlan_tci;
+ }
+
+ if (tx_cmd) {
+ txc->type = tx_desc_type_ctx;
+ txc->idx = 0;
+ }
+
+ return tx_cmd;
+}
+
+static inline void
+atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
+ uint32_t tx_cmd)
+{
+ txd->cmd |= tx_desc_cmd_fcs;
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+ /* L4 csum requested */
+ txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+ txd->cmd |= tx_cmd;
+}
+
+static inline void
+atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
+ struct rte_mbuf *tx_pkt)
+{
+ struct atl_adapter *adapter =
+ ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
+ uint32_t pay_len = 0;
+ int tail = 0;
+ struct atl_tx_entry *tx_entry;
+ uint64_t buf_dma_addr;
+ struct rte_mbuf *m_seg;
+ union hw_atl_txc_s *txc = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ u32 tx_cmd = 0U;
+ int desc_count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tail = txq->tx_tail;
+
+ txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
+
+ txc->flags1 = 0U;
+ txc->flags2 = 0U;
+
+ tx_cmd = atl_tso_setup(tx_pkt, txc);
+
+ if (tx_cmd) {
+ /* We've consumed the first desc, adjust counters */
+ tail = (tail + 1) % txq->nb_tx_desc;
+ txq->tx_tail = tail;
+ txq->tx_free -= 1;
+
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ } else {
+ txd = (struct hw_atl_txd_s *)txc;
+ }
+
+ txd->ct_en = !!tx_cmd;
+
+ txd->type = tx_desc_type_desc;
+
+ atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
+
+ if (tx_cmd)
+ txd->ct_idx = 0;
+
+ pay_len = tx_pkt->pkt_len;
+
+ txd->pay_len = pay_len;
+
+ for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
+ if (desc_count > 0) {
+ txd = &txq->hw_ring[tail];
+ txd->flags = 0U;
+ }
+
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+ txd->type = tx_desc_type_desc;
+ txd->len = m_seg->data_len;
+ txd->pay_len = pay_len;
+
+ /* Store mbuf for freeing later */
+ tx_entry = &txq->sw_ring[tail];
+
+ if (tx_entry->mbuf)
+ rte_pktmbuf_free_seg(tx_entry->mbuf);
+ tx_entry->mbuf = m_seg;
+
+ tail = (tail + 1) % txq->nb_tx_desc;
+
+ desc_count++;
+ }
+
+ // Last descriptor requires EOP and WB
+ txd->eop = 1U;
+ txd->cmd |= tx_desc_cmd_wb;
+
+ hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
+
+ txq->tx_tail = tail;
+
+ txq->tx_free -= desc_count;
+
+ adapter->sw_stats.q_opackets[txq->queue_id]++;
+ adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
+}
+
+uint16_t
+atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev = NULL;
+ struct aq_hw_s *hw = NULL;
+ struct atl_tx_queue *txq = tx_queue;
+ struct rte_mbuf *tx_pkt;
+ uint16_t nb_tx;
+
+ dev = &rte_eth_devices[txq->port_id];
+ hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_TX_LOG(DEBUG,
+ "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
+ txq->port_id, txq->queue_id, nb_pkts, txq->tx_free,
+ txq->tx_tail, txq->tx_head);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ tx_pkt = *tx_pkts++;
+
+ /* Clean Tx queue if needed */
+ if (txq->tx_free < txq->tx_free_thresh)
+ atl_xmit_cleanup(txq);
+
+ /* Check if we have enough free descriptors */
+ if (txq->tx_free < tx_pkt->nb_segs)
+ break;
+
+ /* check mbuf is valid */
+ if ((tx_pkt->nb_segs == 0) ||
+ ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
+ break;
+
+ /* Send the packet */
+ atl_xmit_pkt(hw, txq, tx_pkt);
+ }
+
+ PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);
+
+ return nb_tx;
+}
+