ethdev: add namespace
[dpdk.git] / drivers / net / atlantic / atl_rxtx.c
index 3614156..2ff4268 100644 (file)
@@ -3,7 +3,7 @@
  */
 
 #include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_net.h>
 
 #include "atl_ethdev.h"
@@ -21,6 +21,8 @@
 
 #define ATL_TX_OFFLOAD_MASK (                           \
        PKT_TX_VLAN |                                    \
+       PKT_TX_IPV6 |                                    \
+       PKT_TX_IPV4 |                                    \
        PKT_TX_IP_CKSUM |                                \
        PKT_TX_L4_MASK |                                 \
        PKT_TX_TCP_SEG)
@@ -123,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->rx_queues[rx_queue_id] != NULL) {
-               atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+               atl_rx_queue_release(dev, rx_queue_id);
                dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
@@ -143,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
        rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               DEV_RX_OFFLOAD_IPV4_CKSUM;
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
        rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+               (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
                PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
        /* allocate memory for the software ring */
@@ -245,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->tx_queues[tx_queue_id] != NULL) {
-               atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+               atl_tx_queue_release(dev, tx_queue_id);
                dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
@@ -336,6 +338,7 @@ int
 atl_rx_init(struct rte_eth_dev *eth_dev)
 {
        struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
        struct atl_rx_queue *rxq;
        uint64_t base_addr = 0;
        int i = 0;
@@ -379,6 +382,10 @@ atl_rx_init(struct rte_eth_dev *eth_dev)
                }
        }
 
+       for (i = rss_params->indirection_table_size; i--;)
+               rss_params->indirection_table[i] = i &
+                       (eth_dev->data->nb_rx_queues - 1);
+       hw_atl_b0_hw_rss_set(hw, rss_params);
        return err;
 }
 
@@ -491,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
 
-       if (rx_queue != NULL) {
-               struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (rxq != NULL) {
                atl_rx_queue_release_mbufs(rxq);
                rte_free(rxq->sw_ring);
                rte_free(rxq);
@@ -562,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
-       if (tx_queue != NULL) {
-               struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (txq != NULL) {
                atl_tx_queue_release_mbufs(txq);
                rte_free(txq->sw_ring);
                rte_free(txq);
@@ -583,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               atl_rx_queue_release(dev->data->rx_queues[i]);
+               atl_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = 0;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               atl_tx_queue_release(dev->data->tx_queues[i]);
+               atl_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = 0;
        }
        dev->data->nb_tx_queues = 0;
@@ -651,6 +658,104 @@ atl_stop_queues(struct rte_eth_dev *dev)
        return 0;
 }
 
+void
+atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                struct rte_eth_rxq_info *qinfo)
+{
+       struct atl_rx_queue *rxq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+}
+
+void
+atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+                struct rte_eth_txq_info *qinfo)
+{
+       struct atl_tx_queue *txq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+}
+
+/* Return Rx queue avail count */
+
+uint32_t
+atl_rx_queue_count(void *rx_queue)
+{
+       struct atl_rx_queue *rxq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       rxq = rx_queue;
+
+       if (rxq == NULL)
+               return 0;
+
+       return rxq->nb_rx_desc - rxq->nb_rx_hold;
+}
+
+int
+atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+       struct atl_rx_queue *rxq = rx_queue;
+       struct hw_atl_rxd_wb_s *rxd;
+       uint32_t idx;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (unlikely(offset >= rxq->nb_rx_desc))
+               return -EINVAL;
+
+       if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+               return RTE_ETH_RX_DESC_UNAVAIL;
+
+       idx = rxq->rx_tail + offset;
+
+       if (idx >= rxq->nb_rx_desc)
+               idx -= rxq->nb_rx_desc;
+
+       rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
+
+       if (rxd->dd)
+               return RTE_ETH_RX_DESC_DONE;
+
+       return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct atl_tx_queue *txq = tx_queue;
+       struct hw_atl_txd_s *txd;
+       uint32_t idx;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       idx = txq->tx_tail + offset;
+
+       if (idx >= txq->nb_tx_desc)
+               idx -= txq->nb_tx_desc;
+
+       txd = &txq->hw_ring[idx];
+
+       if (txd->dd)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 static int
 atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
 {
@@ -702,25 +807,25 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                ol_flags = m->ol_flags;
 
                if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
                if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -811,6 +916,8 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        struct atl_adapter *adapter =
                ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
        struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
+       struct aq_hw_cfg_s *cfg =
+               ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
        struct atl_rx_entry *sw_ring = rxq->sw_ring;
 
        struct rte_mbuf *new_mbuf;
@@ -834,7 +941,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        break;
                }
 
-               PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
                           "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
                           (unsigned int)rxq->port_id,
                           (unsigned int)rxq->queue_id,
@@ -869,12 +976,13 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                while (true) {
                        new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
                        if (new_mbuf == NULL) {
-                               PMD_RX_LOG(ERR,
+                               PMD_RX_LOG(DEBUG,
                                   "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned int)rxq->port_id,
                                   (unsigned int)rxq->queue_id);
                                dev->data->rx_mbuf_alloc_failed++;
-                                               goto err_stop;
+                               adapter->sw_stats.rx_nombuf++;
+                               goto err_stop;
                        }
 
                        nb_hold++;
@@ -927,8 +1035,18 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                        rx_mbuf->ol_flags =
                                atl_desc_to_offload_flags(rxq, &rxd_wb);
+
                        rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
 
+                       if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
+                               rx_mbuf->ol_flags |= PKT_RX_VLAN;
+                               rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+                               if (cfg->vlan_strip)
+                                       rx_mbuf->ol_flags |=
+                                               PKT_RX_VLAN_STRIPPED;
+                       }
+
                        if (!rx_mbuf_first)
                                rx_mbuf_first = rx_mbuf;
                        rx_mbuf_first->nb_segs++;
@@ -957,8 +1075,11 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 * of returned packets.
                 */
                rx_pkts[nb_rx++] = rx_mbuf_first;
+               adapter->sw_stats.q_ipackets[rxq->queue_id]++;
+               adapter->sw_stats.q_ibytes[rxq->queue_id] +=
+                       rx_mbuf_first->pkt_len;
 
-               PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+               PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
                        rx_mbuf_first->nb_segs,
                        rx_mbuf_first->pkt_len);
        }
@@ -978,7 +1099,7 @@ err_stop:
         */
        nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
        if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
                        "nb_hold=%u nb_rx=%u",
                        (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
                        (unsigned int)tail, (unsigned int)nb_hold,
@@ -1003,8 +1124,6 @@ atl_xmit_cleanup(struct atl_tx_queue *txq)
        struct hw_atl_txd_s *txd;
        int to_clean = 0;
 
-       PMD_INIT_FUNC_TRACE();
-
        if (txq != NULL) {
                sw_ring = txq->sw_ring;
                int head = txq->tx_head;
@@ -1055,11 +1174,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
        uint32_t tx_cmd = 0;
        uint64_t ol_flags = tx_pkt->ol_flags;
 
-       PMD_INIT_FUNC_TRACE();
-
        if (ol_flags & PKT_TX_TCP_SEG) {
-               PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
-
                tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
 
                txc->cmd = 0x4;
@@ -1102,6 +1217,8 @@ static inline void
 atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
             struct rte_mbuf *tx_pkt)
 {
+       struct atl_adapter *adapter =
+               ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
        uint32_t pay_len = 0;
        int tail = 0;
        struct atl_tx_entry *tx_entry;
@@ -1112,8 +1229,6 @@ atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
        u32 tx_cmd = 0U;
        int desc_count = 0;
 
-       PMD_INIT_FUNC_TRACE();
-
        tail = txq->tx_tail;
 
        txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
@@ -1182,6 +1297,9 @@ atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
        txq->tx_tail = tail;
 
        txq->tx_free -= desc_count;
+
+       adapter->sw_stats.q_opackets[txq->queue_id]++;
+       adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
 }
 
 uint16_t
@@ -1225,4 +1343,3 @@ atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
        return nb_tx;
 }
-