net/nfp: move datapath functions to their own file
authorHeinrich Kuhn <heinrich.kuhn@netronome.com>
Thu, 29 Jul 2021 13:47:06 +0000 (15:47 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 17 Aug 2021 16:26:57 +0000 (18:26 +0200)
Create a new rxtx file and move the Rx/Tx functions to this file. This
commit will also move the needed shared functions to the nfp_net_pmd.h
file as needed.

Signed-off-by: Heinrich Kuhn <heinrich.kuhn@netronome.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
drivers/net/nfp/meson.build
drivers/net/nfp/nfp_net.c
drivers/net/nfp/nfp_net_pmd.h
drivers/net/nfp/nfp_rxtx.c [new file with mode: 0644]
drivers/net/nfp/nfp_rxtx.h

index b51e2e5..1b289e2 100644 (file)
@@ -19,4 +19,5 @@ sources = files(
         'nfpcore/nfp_nsp_eth.c',
         'nfpcore/nfp_hwinfo.c',
         'nfp_net.c',
+        'nfp_rxtx.c',
 )
index 29eb8c8..de0de80 100644 (file)
@@ -68,29 +68,11 @@ static int nfp_init_phyports(struct nfp_pf_dev *pf_dev);
 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
 static int nfp_net_promisc_enable(struct rte_eth_dev *dev);
 static int nfp_net_promisc_disable(struct rte_eth_dev *dev);
-static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
-static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
-                                      uint16_t queue_idx);
-static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-                                 uint16_t nb_pkts);
-static void nfp_net_rx_queue_release(void *rxq);
-static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                                 uint16_t nb_desc, unsigned int socket_id,
-                                 const struct rte_eth_rxconf *rx_conf,
-                                 struct rte_mempool *mp);
-static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
-static void nfp_net_tx_queue_release(void *txq);
-static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                                 uint16_t nb_desc, unsigned int socket_id,
-                                 const struct rte_eth_txconf *tx_conf);
 static int nfp_net_start(struct rte_eth_dev *dev);
 static int nfp_net_stats_get(struct rte_eth_dev *dev,
                              struct rte_eth_stats *stats);
 static int nfp_net_stats_reset(struct rte_eth_dev *dev);
 static int nfp_net_stop(struct rte_eth_dev *dev);
-static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-                                 uint16_t nb_pkts);
-
 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
                                   struct rte_eth_rss_conf *rss_conf);
@@ -108,184 +90,6 @@ static int nfp_fw_setup(struct rte_pci_device *dev,
                        struct nfp_eth_table *nfp_eth_table,
                        struct nfp_hwinfo *hwinfo);
 
-
-/* The offset of the queue controller queues in the PCIe Target */
-#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
-
-/* Maximum value which can be added to a queue with one transaction */
-#define NFP_QCP_MAX_ADD        0x7f
-
-#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
-
-/* nfp_qcp_ptr - Read or Write Pointer of a queue */
-enum nfp_qcp_ptr {
-       NFP_QCP_READ_PTR = 0,
-       NFP_QCP_WRITE_PTR
-};
-
-/*
- * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
- * @q: Base address for queue structure
- * @ptr: Add to the Read or Write pointer
- * @val: Value to add to the queue pointer
- *
- * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
- */
-static inline void
-nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
-{
-       uint32_t off;
-
-       if (ptr == NFP_QCP_READ_PTR)
-               off = NFP_QCP_QUEUE_ADD_RPTR;
-       else
-               off = NFP_QCP_QUEUE_ADD_WPTR;
-
-       while (val > NFP_QCP_MAX_ADD) {
-               nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
-               val -= NFP_QCP_MAX_ADD;
-       }
-
-       nn_writel(rte_cpu_to_le_32(val), q + off);
-}
-
-/*
- * nfp_qcp_read - Read the current Read/Write pointer value for a queue
- * @q:  Base address for queue structure
- * @ptr: Read or Write pointer
- */
-static inline uint32_t
-nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
-{
-       uint32_t off;
-       uint32_t val;
-
-       if (ptr == NFP_QCP_READ_PTR)
-               off = NFP_QCP_QUEUE_STS_LO;
-       else
-               off = NFP_QCP_QUEUE_STS_HI;
-
-       val = rte_cpu_to_le_32(nn_readl(q + off));
-
-       if (ptr == NFP_QCP_READ_PTR)
-               return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
-       else
-               return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
-}
-
-/*
- * Functions to read/write from/to Config BAR
- * Performs any endian conversion necessary.
- */
-static inline uint8_t
-nn_cfg_readb(struct nfp_net_hw *hw, int off)
-{
-       return nn_readb(hw->ctrl_bar + off);
-}
-
-static inline void
-nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
-{
-       nn_writeb(val, hw->ctrl_bar + off);
-}
-
-static inline uint32_t
-nn_cfg_readl(struct nfp_net_hw *hw, int off)
-{
-       return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
-}
-
-static inline void
-nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
-{
-       nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
-}
-
-static inline uint64_t
-nn_cfg_readq(struct nfp_net_hw *hw, int off)
-{
-       return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
-}
-
-static inline void
-nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
-{
-       nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
-}
-
-static void
-nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
-{
-       unsigned i;
-
-       if (rxq->rxbufs == NULL)
-               return;
-
-       for (i = 0; i < rxq->rx_count; i++) {
-               if (rxq->rxbufs[i].mbuf) {
-                       rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
-                       rxq->rxbufs[i].mbuf = NULL;
-               }
-       }
-}
-
-static void
-nfp_net_rx_queue_release(void *rx_queue)
-{
-       struct nfp_net_rxq *rxq = rx_queue;
-
-       if (rxq) {
-               nfp_net_rx_queue_release_mbufs(rxq);
-               rte_free(rxq->rxbufs);
-               rte_free(rxq);
-       }
-}
-
-static void
-nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
-{
-       nfp_net_rx_queue_release_mbufs(rxq);
-       rxq->rd_p = 0;
-       rxq->nb_rx_hold = 0;
-}
-
-static void
-nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
-{
-       unsigned i;
-
-       if (txq->txbufs == NULL)
-               return;
-
-       for (i = 0; i < txq->tx_count; i++) {
-               if (txq->txbufs[i].mbuf) {
-                       rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
-                       txq->txbufs[i].mbuf = NULL;
-               }
-       }
-}
-
-static void
-nfp_net_tx_queue_release(void *tx_queue)
-{
-       struct nfp_net_txq *txq = tx_queue;
-
-       if (txq) {
-               nfp_net_tx_queue_release_mbufs(txq);
-               rte_free(txq->txbufs);
-               rte_free(txq);
-       }
-}
-
-static void
-nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
-{
-       nfp_net_tx_queue_release_mbufs(txq);
-       txq->wr_p = 0;
-       txq->rd_p = 0;
-}
-
 static int
 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
 {
@@ -463,18 +267,6 @@ nfp_net_disable_queues(struct rte_eth_dev *dev)
        hw->ctrl = new_ctrl;
 }
 
-static int
-nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
-{
-       int i;
-
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
-                       return -1;
-       }
-       return 0;
-}
-
 static void
 nfp_net_params_setup(struct nfp_net_hw *hw)
 {
@@ -1351,44 +1143,6 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
        return NULL;
 }
 
-static uint32_t
-nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
-{
-       struct nfp_net_rxq *rxq;
-       struct nfp_net_rx_desc *rxds;
-       uint32_t idx;
-       uint32_t count;
-
-       rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
-
-       idx = rxq->rd_p;
-
-       count = 0;
-
-       /*
-        * Other PMDs are just checking the DD bit in intervals of 4
-        * descriptors and counting all four if the first has the DD
-        * bit on. Of course, this is not accurate but can be good for
-        * performance. But ideally that should be done in descriptors
-        * chunks belonging to the same cache line
-        */
-
-       while (count < rxq->rx_count) {
-               rxds = &rxq->rxds[idx];
-               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
-                       break;
-
-               count++;
-               idx++;
-
-               /* Wrapping? */
-               if ((idx) == rxq->rx_count)
-                       idx = 0;
-       }
-
-       return count;
-}
-
 static int
 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -1570,850 +1324,6 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        return 0;
 }
 
-static int
-nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
-                      uint16_t queue_idx, uint16_t nb_desc,
-                      unsigned int socket_id,
-                      const struct rte_eth_rxconf *rx_conf,
-                      struct rte_mempool *mp)
-{
-       const struct rte_memzone *tz;
-       struct nfp_net_rxq *rxq;
-       struct nfp_net_hw *hw;
-       uint32_t rx_desc_sz;
-
-       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       PMD_INIT_FUNC_TRACE();
-
-       /* Validating number of descriptors */
-       rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
-       if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
-           nb_desc > NFP_NET_MAX_RX_DESC ||
-           nb_desc < NFP_NET_MIN_RX_DESC) {
-               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
-               return -EINVAL;
-       }
-
-       /*
-        * Free memory prior to re-allocation if needed. This is the case after
-        * calling nfp_net_stop
-        */
-       if (dev->data->rx_queues[queue_idx]) {
-               nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
-       }
-
-       /* Allocating rx queue data structure */
-       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
-                                RTE_CACHE_LINE_SIZE, socket_id);
-       if (rxq == NULL)
-               return -ENOMEM;
-
-       /* Hw queues mapping based on firmware configuration */
-       rxq->qidx = queue_idx;
-       rxq->fl_qcidx = queue_idx * hw->stride_rx;
-       rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
-       rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
-       rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
-
-       /*
-        * Tracking mbuf size for detecting a potential mbuf overflow due to
-        * RX offset
-        */
-       rxq->mem_pool = mp;
-       rxq->mbuf_size = rxq->mem_pool->elt_size;
-       rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
-       hw->flbufsz = rxq->mbuf_size;
-
-       rxq->rx_count = nb_desc;
-       rxq->port_id = dev->data->port_id;
-       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
-       rxq->drop_en = rx_conf->rx_drop_en;
-
-       /*
-        * Allocate RX ring hardware descriptors. A memzone large enough to
-        * handle the maximum ring size is allocated in order to allow for
-        * resizing in later calls to the queue setup function.
-        */
-       tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
-                                  sizeof(struct nfp_net_rx_desc) *
-                                  NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
-                                  socket_id);
-
-       if (tz == NULL) {
-               PMD_DRV_LOG(ERR, "Error allocating rx dma");
-               nfp_net_rx_queue_release(rxq);
-               return -ENOMEM;
-       }
-
-       /* Saving physical and virtual addresses for the RX ring */
-       rxq->dma = (uint64_t)tz->iova;
-       rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
-
-       /* mbuf pointers array for referencing mbufs linked to RX descriptors */
-       rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
-                                        sizeof(*rxq->rxbufs) * nb_desc,
-                                        RTE_CACHE_LINE_SIZE, socket_id);
-       if (rxq->rxbufs == NULL) {
-               nfp_net_rx_queue_release(rxq);
-               return -ENOMEM;
-       }
-
-       PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-                  rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
-
-       nfp_net_reset_rx_queue(rxq);
-
-       dev->data->rx_queues[queue_idx] = rxq;
-       rxq->hw = hw;
-
-       /*
-        * Telling the HW about the physical address of the RX ring and number
-        * of descriptors in log2 format
-        */
-       nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
-
-       return 0;
-}
-
-static int
-nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
-{
-       struct nfp_net_rx_buff *rxe = rxq->rxbufs;
-       uint64_t dma_addr;
-       unsigned i;
-
-       PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
-                  rxq->rx_count);
-
-       for (i = 0; i < rxq->rx_count; i++) {
-               struct nfp_net_rx_desc *rxd;
-               struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
-
-               if (mbuf == NULL) {
-                       PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
-                               (unsigned)rxq->qidx);
-                       return -ENOMEM;
-               }
-
-               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
-
-               rxd = &rxq->rxds[i];
-               rxd->fld.dd = 0;
-               rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
-               rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
-               rxe[i].mbuf = mbuf;
-               PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
-       }
-
-       /* Make sure all writes are flushed before telling the hardware */
-       rte_wmb();
-
-       /* Not advertising the whole ring as the firmware gets confused if so */
-       PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
-                  rxq->rx_count - 1);
-
-       nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
-
-       return 0;
-}
-
-static int
-nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                      uint16_t nb_desc, unsigned int socket_id,
-                      const struct rte_eth_txconf *tx_conf)
-{
-       const struct rte_memzone *tz;
-       struct nfp_net_txq *txq;
-       uint16_t tx_free_thresh;
-       struct nfp_net_hw *hw;
-       uint32_t tx_desc_sz;
-
-       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       PMD_INIT_FUNC_TRACE();
-
-       /* Validating number of descriptors */
-       tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
-       if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
-           nb_desc > NFP_NET_MAX_TX_DESC ||
-           nb_desc < NFP_NET_MIN_TX_DESC) {
-               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
-               return -EINVAL;
-       }
-
-       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
-                                   tx_conf->tx_free_thresh :
-                                   DEFAULT_TX_FREE_THRESH);
-
-       if (tx_free_thresh > (nb_desc)) {
-               PMD_DRV_LOG(ERR,
-                       "tx_free_thresh must be less than the number of TX "
-                       "descriptors. (tx_free_thresh=%u port=%d "
-                       "queue=%d)", (unsigned int)tx_free_thresh,
-                       dev->data->port_id, (int)queue_idx);
-               return -(EINVAL);
-       }
-
-       /*
-        * Free memory prior to re-allocation if needed. This is the case after
-        * calling nfp_net_stop
-        */
-       if (dev->data->tx_queues[queue_idx]) {
-               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
-                          queue_idx);
-               nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
-       }
-
-       /* Allocating tx queue data structure */
-       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
-                                RTE_CACHE_LINE_SIZE, socket_id);
-       if (txq == NULL) {
-               PMD_DRV_LOG(ERR, "Error allocating tx dma");
-               return -ENOMEM;
-       }
-
-       /*
-        * Allocate TX ring hardware descriptors. A memzone large enough to
-        * handle the maximum ring size is allocated in order to allow for
-        * resizing in later calls to the queue setup function.
-        */
-       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                  sizeof(struct nfp_net_tx_desc) *
-                                  NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
-                                  socket_id);
-       if (tz == NULL) {
-               PMD_DRV_LOG(ERR, "Error allocating tx dma");
-               nfp_net_tx_queue_release(txq);
-               return -ENOMEM;
-       }
-
-       txq->tx_count = nb_desc;
-       txq->tx_free_thresh = tx_free_thresh;
-       txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
-       txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
-       txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
-
-       /* queue mapping based on firmware configuration */
-       txq->qidx = queue_idx;
-       txq->tx_qcidx = queue_idx * hw->stride_tx;
-       txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
-
-       txq->port_id = dev->data->port_id;
-
-       /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->iova;
-       txq->txds = (struct nfp_net_tx_desc *)tz->addr;
-
-       /* mbuf pointers array for referencing mbufs linked to TX descriptors */
-       txq->txbufs = rte_zmalloc_socket("txq->txbufs",
-                                        sizeof(*txq->txbufs) * nb_desc,
-                                        RTE_CACHE_LINE_SIZE, socket_id);
-       if (txq->txbufs == NULL) {
-               nfp_net_tx_queue_release(txq);
-               return -ENOMEM;
-       }
-       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-                  txq->txbufs, txq->txds, (unsigned long int)txq->dma);
-
-       nfp_net_reset_tx_queue(txq);
-
-       dev->data->tx_queues[queue_idx] = txq;
-       txq->hw = hw;
-
-       /*
-        * Telling the HW about the physical address of the TX ring and number
-        * of descriptors in log2 format
-        */
-       nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
-
-       return 0;
-}
-
-/* nfp_net_tx_tso - Set TX descriptor for TSO */
-static inline void
-nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
-              struct rte_mbuf *mb)
-{
-       uint64_t ol_flags;
-       struct nfp_net_hw *hw = txq->hw;
-
-       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
-               goto clean_txd;
-
-       ol_flags = mb->ol_flags;
-
-       if (!(ol_flags & PKT_TX_TCP_SEG))
-               goto clean_txd;
-
-       txd->l3_offset = mb->l2_len;
-       txd->l4_offset = mb->l2_len + mb->l3_len;
-       txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
-       txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
-       txd->flags = PCIE_DESC_TX_LSO;
-       return;
-
-clean_txd:
-       txd->flags = 0;
-       txd->l3_offset = 0;
-       txd->l4_offset = 0;
-       txd->lso_hdrlen = 0;
-       txd->mss = 0;
-}
-
-/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
-static inline void
-nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
-                struct rte_mbuf *mb)
-{
-       uint64_t ol_flags;
-       struct nfp_net_hw *hw = txq->hw;
-
-       if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
-               return;
-
-       ol_flags = mb->ol_flags;
-
-       /* IPv6 does not need checksum */
-       if (ol_flags & PKT_TX_IP_CKSUM)
-               txd->flags |= PCIE_DESC_TX_IP4_CSUM;
-
-       switch (ol_flags & PKT_TX_L4_MASK) {
-       case PKT_TX_UDP_CKSUM:
-               txd->flags |= PCIE_DESC_TX_UDP_CSUM;
-               break;
-       case PKT_TX_TCP_CKSUM:
-               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
-               break;
-       }
-
-       if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
-               txd->flags |= PCIE_DESC_TX_CSUM;
-}
-
-/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
-static inline void
-nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
-                struct rte_mbuf *mb)
-{
-       struct nfp_net_hw *hw = rxq->hw;
-
-       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
-               return;
-
-       /* If IPv4 and IP checksum error, fail */
-       if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
-           !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
-               mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-       else
-               mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
-
-       /* If neither UDP nor TCP return */
-       if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
-           !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
-               return;
-
-       if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
-               mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
-       else
-               mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-}
-
-#define NFP_HASH_OFFSET      ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
-#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
-
-#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
-
-/*
- * nfp_net_set_hash - Set mbuf hash data
- *
- * The RSS hash and hash-type are pre-pended to the packet data.
- * Extract and decode it and set the mbuf fields.
- */
-static inline void
-nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
-                struct rte_mbuf *mbuf)
-{
-       struct nfp_net_hw *hw = rxq->hw;
-       uint8_t *meta_offset;
-       uint32_t meta_info;
-       uint32_t hash = 0;
-       uint32_t hash_type = 0;
-
-       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
-               return;
-
-       /* this is true for new firmwares */
-       if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
-           (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
-            NFP_DESC_META_LEN(rxd))) {
-               /*
-                * new metadata api:
-                * <----  32 bit  ----->
-                * m    field type word
-                * e     data field #2
-                * t     data field #1
-                * a     data field #0
-                * ====================
-                *    packet data
-                *
-                * Field type word contains up to 8 4bit field types
-                * A 4bit field type refers to a data field word
-                * A data field word can have several 4bit field types
-                */
-               meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
-               meta_offset -= NFP_DESC_META_LEN(rxd);
-               meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
-               meta_offset += 4;
-               /* NFP PMD just supports metadata for hashing */
-               switch (meta_info & NFP_NET_META_FIELD_MASK) {
-               case NFP_NET_META_HASH:
-                       /* next field type is about the hash type */
-                       meta_info >>= NFP_NET_META_FIELD_SIZE;
-                       /* hash value is in the data field */
-                       hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
-                       hash_type = meta_info & NFP_NET_META_FIELD_MASK;
-                       break;
-               default:
-                       /* Unsupported metadata can be a performance issue */
-                       return;
-               }
-       } else {
-               if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
-                       return;
-
-               hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
-               hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
-       }
-
-       mbuf->hash.rss = hash;
-       mbuf->ol_flags |= PKT_RX_RSS_HASH;
-
-       switch (hash_type) {
-       case NFP_NET_RSS_IPV4:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
-               break;
-       case NFP_NET_RSS_IPV6:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
-               break;
-       case NFP_NET_RSS_IPV6_EX:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
-               break;
-       case NFP_NET_RSS_IPV4_TCP:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
-               break;
-       case NFP_NET_RSS_IPV6_TCP:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
-               break;
-       case NFP_NET_RSS_IPV4_UDP:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
-               break;
-       case NFP_NET_RSS_IPV6_UDP:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
-               break;
-       default:
-               mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
-       }
-}
-
-static inline void
-nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
-{
-       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
-}
-
-#define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
-
-/*
- * RX path design:
- *
- * There are some decisions to take:
- * 1) How to check DD RX descriptors bit
- * 2) How and when to allocate new mbufs
- *
- * Current implementation checks just one single DD bit each loop. As each
- * descriptor is 8 bytes, it is likely a good idea to check descriptors in
- * a single cache line instead. Tests with this change have not shown any
- * performance improvement but it requires further investigation. For example,
- * depending on which descriptor is next, the number of descriptors could be
- * less than 8 for just checking those in the same cache line. This implies
- * extra work which could be counterproductive by itself. Indeed, last firmware
- * changes are just doing this: writing several descriptors with the DD bit
- * for saving PCIe bandwidth and DMA operations from the NFP.
- *
- * Mbuf allocation is done when a new packet is received. Then the descriptor
- * is automatically linked with the new mbuf and the old one is given to the
- * user. The main drawback with this design is mbuf allocation is heavier than
- * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
- * cache point of view it does not seem allocating the mbuf early on as we are
- * doing now have any benefit at all. Again, tests with this change have not
- * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
- * so looking at the implications of this type of allocation should be studied
- * deeply
- */
-
-static uint16_t
-nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
-       struct nfp_net_rxq *rxq;
-       struct nfp_net_rx_desc *rxds;
-       struct nfp_net_rx_buff *rxb;
-       struct nfp_net_hw *hw;
-       struct rte_mbuf *mb;
-       struct rte_mbuf *new_mb;
-       uint16_t nb_hold;
-       uint64_t dma_addr;
-       int avail;
-
-       rxq = rx_queue;
-       if (unlikely(rxq == NULL)) {
-               /*
-                * DPDK just checks the queue is lower than max queues
-                * enabled. But the queue needs to be configured
-                */
-               RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
-               return -EINVAL;
-       }
-
-       hw = rxq->hw;
-       avail = 0;
-       nb_hold = 0;
-
-       while (avail < nb_pkts) {
-               rxb = &rxq->rxbufs[rxq->rd_p];
-               if (unlikely(rxb == NULL)) {
-                       RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
-                       break;
-               }
-
-               rxds = &rxq->rxds[rxq->rd_p];
-               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
-                       break;
-
-               /*
-                * Memory barrier to ensure that we won't do other
-                * reads before the DD bit.
-                */
-               rte_rmb();
-
-               /*
-                * We got a packet. Let's alloc a new mbuf for refilling the
-                * free descriptor ring as soon as possible
-                */
-               new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
-               if (unlikely(new_mb == NULL)) {
-                       RTE_LOG_DP(DEBUG, PMD,
-                       "RX mbuf alloc failed port_id=%u queue_id=%u\n",
-                               rxq->port_id, (unsigned int)rxq->qidx);
-                       nfp_net_mbuf_alloc_failed(rxq);
-                       break;
-               }
-
-               nb_hold++;
-
-               /*
-                * Grab the mbuf and refill the descriptor with the
-                * previously allocated mbuf
-                */
-               mb = rxb->mbuf;
-               rxb->mbuf = new_mb;
-
-               PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
-                          rxds->rxd.data_len, rxq->mbuf_size);
-
-               /* Size of this segment */
-               mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
-               /* Size of the whole packet. We just support 1 segment */
-               mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
-
-               if (unlikely((mb->data_len + hw->rx_offset) >
-                            rxq->mbuf_size)) {
-                       /*
-                        * This should not happen and the user has the
-                        * responsibility of avoiding it. But we have
-                        * to give some info about the error
-                        */
-                       RTE_LOG_DP(ERR, PMD,
-                               "mbuf overflow likely due to the RX offset.\n"
-                               "\t\tYour mbuf size should have extra space for"
-                               " RX offset=%u bytes.\n"
-                               "\t\tCurrently you just have %u bytes available"
-                               " but the received packet is %u bytes long",
-                               hw->rx_offset,
-                               rxq->mbuf_size - hw->rx_offset,
-                               mb->data_len);
-                       return -EINVAL;
-               }
-
-               /* Filling the received mbuf with packet info */
-               if (hw->rx_offset)
-                       mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
-               else
-                       mb->data_off = RTE_PKTMBUF_HEADROOM +
-                                      NFP_DESC_META_LEN(rxds);
-
-               /* No scatter mode supported */
-               mb->nb_segs = 1;
-               mb->next = NULL;
-
-               mb->port = rxq->port_id;
-
-               /* Checking the RSS flag */
-               nfp_net_set_hash(rxq, rxds, mb);
-
-               /* Checking the checksum flag */
-               nfp_net_rx_cksum(rxq, rxds, mb);
-
-               if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
-                   (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
-                       mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
-                       mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-               }
-
-               /* Adding the mbuf to the mbuf array passed by the app */
-               rx_pkts[avail++] = mb;
-
-               /* Now resetting and updating the descriptor */
-               rxds->vals[0] = 0;
-               rxds->vals[1] = 0;
-               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
-               rxds->fld.dd = 0;
-               rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
-               rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
-
-               rxq->rd_p++;
-               if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
-                       rxq->rd_p = 0;
-       }
-
-       if (nb_hold == 0)
-               return nb_hold;
-
-       PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received",
-                  rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
-
-       nb_hold += rxq->nb_rx_hold;
-
-       /*
-        * FL descriptors needs to be written before incrementing the
-        * FL queue WR pointer
-        */
-       rte_wmb();
-       if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
-                          rxq->port_id, (unsigned int)rxq->qidx,
-                          (unsigned)nb_hold, (unsigned)avail);
-               nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
-               nb_hold = 0;
-       }
-       rxq->nb_rx_hold = nb_hold;
-
-       return avail;
-}
-
-/*
- * nfp_net_tx_free_bufs - Check for descriptors with a complete
- * status
- * @txq: TX queue to work with
- * Returns number of descriptors freed
- */
-int
-nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
-{
-       uint32_t qcp_rd_p;
-       int todo;
-
-       PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
-                  " status", txq->qidx);
-
-       /* Work out how many packets have been sent */
-       qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
-
-       if (qcp_rd_p == txq->rd_p) {
-               PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
-                          "packets (%u, %u)", txq->qidx,
-                          qcp_rd_p, txq->rd_p);
-               return 0;
-       }
-
-       if (qcp_rd_p > txq->rd_p)
-               todo = qcp_rd_p - txq->rd_p;
-       else
-               todo = qcp_rd_p + txq->tx_count - txq->rd_p;
-
-       PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
-                  qcp_rd_p, txq->rd_p, txq->rd_p);
-
-       if (todo == 0)
-               return todo;
-
-       txq->rd_p += todo;
-       if (unlikely(txq->rd_p >= txq->tx_count))
-               txq->rd_p -= txq->tx_count;
-
-       return todo;
-}
-
-/* Leaving always free descriptors for avoiding wrapping confusion */
-static inline
-uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
-{
-       if (txq->wr_p >= txq->rd_p)
-               return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
-       else
-               return txq->rd_p - txq->wr_p - 8;
-}
-
-/*
- * nfp_net_txq_full - Check if the TX queue free descriptors
- * is below tx_free_threshold
- *
- * @txq: TX queue to check
- *
- * This function uses the host copy* of read/write pointers
- */
-static inline
-uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
-{
-       return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
-}
-
-static uint16_t
-nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
-       struct nfp_net_txq *txq;
-       struct nfp_net_hw *hw;
-       struct nfp_net_tx_desc *txds, txd;
-       struct rte_mbuf *pkt;
-       uint64_t dma_addr;
-       int pkt_size, dma_size;
-       uint16_t free_descs, issued_descs;
-       struct rte_mbuf **lmbuf;
-       int i;
-
-       txq = tx_queue;
-       hw = txq->hw;
-       txds = &txq->txds[txq->wr_p];
-
-       PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
-                  txq->qidx, txq->wr_p, nb_pkts);
-
-       if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
-               nfp_net_tx_free_bufs(txq);
-
-       free_descs = (uint16_t)nfp_free_tx_desc(txq);
-       if (unlikely(free_descs == 0))
-               return 0;
-
-       pkt = *tx_pkts;
-
-       i = 0;
-       issued_descs = 0;
-       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
-                  txq->qidx, nb_pkts);
-       /* Sending packets */
-       while ((i < nb_pkts) && free_descs) {
-               /* Grabbing the mbuf linked to the current descriptor */
-               lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-               /* Warming the cache for releasing the mbuf later on */
-               RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
-
-               pkt = *(tx_pkts + i);
-
-               if (unlikely((pkt->nb_segs > 1) &&
-                            !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-                       PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
-                       rte_panic("Multisegment packet unsupported\n");
-               }
-
-               /* Checking if we have enough descriptors */
-               if (unlikely(pkt->nb_segs > free_descs))
-                       goto xmit_end;
-
-               /*
-                * Checksum and VLAN flags just in the first descriptor for a
-                * multisegment packet, but TSO info needs to be in all of them.
-                */
-               txd.data_len = pkt->pkt_len;
-               nfp_net_tx_tso(txq, &txd, pkt);
-               nfp_net_tx_cksum(txq, &txd, pkt);
-
-               if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
-                   (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
-                       txd.flags |= PCIE_DESC_TX_VLAN;
-                       txd.vlan = pkt->vlan_tci;
-               }
-
-               /*
-                * mbuf data_len is the data in one segment and pkt_len data
-                * in the whole packet. When the packet is just one segment,
-                * then data_len = pkt_len
-                */
-               pkt_size = pkt->pkt_len;
-
-               while (pkt) {
-                       /* Copying TSO, VLAN and cksum info */
-                       *txds = txd;
-
-                       /* Releasing mbuf used by this descriptor previously*/
-                       if (*lmbuf)
-                               rte_pktmbuf_free_seg(*lmbuf);
-
-                       /*
-                        * Linking mbuf with descriptor for being released
-                        * next time descriptor is used
-                        */
-                       *lmbuf = pkt;
-
-                       dma_size = pkt->data_len;
-                       dma_addr = rte_mbuf_data_iova(pkt);
-                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
-                                  "%" PRIx64 "", dma_addr);
-
-                       /* Filling descriptors fields */
-                       txds->dma_len = dma_size;
-                       txds->data_len = txd.data_len;
-                       txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
-                       txds->dma_addr_lo = (dma_addr & 0xffffffff);
-                       ASSERT(free_descs > 0);
-                       free_descs--;
-
-                       txq->wr_p++;
-                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
-                               txq->wr_p = 0;
-
-                       pkt_size -= dma_size;
-
-                       /*
-                        * Making the EOP, packets with just one segment
-                        * the priority
-                        */
-                       if (likely(!pkt_size))
-                               txds->offset_eop = PCIE_DESC_TX_EOP;
-                       else
-                               txds->offset_eop = 0;
-
-                       pkt = pkt->next;
-                       /* Referencing next free TX descriptor */
-                       txds = &txq->txds[txq->wr_p];
-                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-                       issued_descs++;
-               }
-               i++;
-       }
-
-xmit_end:
-       /* Increment write pointers. Force memory write before we let HW know */
-       rte_wmb();
-       nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
-
-       return i;
-}
-
 static int
 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
index a3a3ba3..9265496 100644 (file)
@@ -41,6 +41,12 @@ struct nfp_net_adapter;
 #define NFP_QCP_QUEUE_STS_HI                    0x000c
 #define NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask    (0x3ffff)
 
+/* The offset of the queue controller queues in the PCIe Target */
+#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
+
+/* Maximum value which can be added to a queue with one transaction */
+#define NFP_QCP_MAX_ADD        0x7f
+
 /* Interrupt definitions */
 #define NFP_NET_IRQ_LSC_IDX             0
 
@@ -95,47 +101,11 @@ struct nfp_net_adapter;
 #include <linux/types.h>
 #include <rte_io.h>
 
-static inline uint8_t nn_readb(volatile const void *addr)
-{
-       return rte_read8(addr);
-}
-
-static inline void nn_writeb(uint8_t val, volatile void *addr)
-{
-       rte_write8(val, addr);
-}
-
-static inline uint32_t nn_readl(volatile const void *addr)
-{
-       return rte_read32(addr);
-}
-
-static inline void nn_writel(uint32_t val, volatile void *addr)
-{
-       rte_write32(val, addr);
-}
-
-static inline void nn_writew(uint16_t val, volatile void *addr)
-{
-       rte_write16(val, addr);
-}
-
-static inline uint64_t nn_readq(volatile void *addr)
-{
-       const volatile uint32_t *p = addr;
-       uint32_t low, high;
-
-       high = nn_readl((volatile const void *)(p + 1));
-       low = nn_readl((volatile const void *)p);
-
-       return low + ((uint64_t)high << 32);
-}
-
-static inline void nn_writeq(uint64_t val, volatile void *addr)
-{
-       nn_writel(val >> 32, (volatile char *)addr + 4);
-       nn_writel(val, addr);
-}
+/* nfp_qcp_ptr - Read or Write Pointer of a queue */
+enum nfp_qcp_ptr {
+       NFP_QCP_READ_PTR = 0,
+       NFP_QCP_WRITE_PTR
+};
 
 struct nfp_pf_dev {
        /* Backpointer to associated pci device */
@@ -247,6 +217,138 @@ struct nfp_net_adapter {
        struct nfp_net_hw hw;
 };
 
+static inline uint8_t nn_readb(volatile const void *addr)
+{
+       return rte_read8(addr);
+}
+
+static inline void nn_writeb(uint8_t val, volatile void *addr)
+{
+       rte_write8(val, addr);
+}
+
+static inline uint32_t nn_readl(volatile const void *addr)
+{
+       return rte_read32(addr);
+}
+
+static inline void nn_writel(uint32_t val, volatile void *addr)
+{
+       rte_write32(val, addr);
+}
+
+static inline void nn_writew(uint16_t val, volatile void *addr)
+{
+       rte_write16(val, addr);
+}
+
+static inline uint64_t nn_readq(volatile void *addr)
+{
+       const volatile uint32_t *p = addr;
+       uint32_t low, high;
+
+       high = nn_readl((volatile const void *)(p + 1));
+       low = nn_readl((volatile const void *)p);
+
+       return low + ((uint64_t)high << 32);
+}
+
+static inline void nn_writeq(uint64_t val, volatile void *addr)
+{
+       nn_writel(val >> 32, (volatile char *)addr + 4);
+       nn_writel(val, addr);
+}
+
+/*
+ * Functions to read/write from/to Config BAR
+ * Performs any endian conversion necessary.
+ */
+static inline uint8_t
+nn_cfg_readb(struct nfp_net_hw *hw, int off)
+{
+       return nn_readb(hw->ctrl_bar + off);
+}
+
+static inline void
+nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
+{
+       nn_writeb(val, hw->ctrl_bar + off);
+}
+
+static inline uint32_t
+nn_cfg_readl(struct nfp_net_hw *hw, int off)
+{
+       return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
+}
+
+static inline void
+nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
+{
+       nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
+}
+
+static inline uint64_t
+nn_cfg_readq(struct nfp_net_hw *hw, int off)
+{
+       return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
+}
+
+static inline void
+nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
+{
+       nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
+}
+
+/*
+ * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
+ * @q: Base address for queue structure
+ * @ptr: Add to the Read or Write pointer
+ * @val: Value to add to the queue pointer
+ *
+ * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
+ */
+static inline void
+nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
+{
+       uint32_t off;
+
+       if (ptr == NFP_QCP_READ_PTR)
+               off = NFP_QCP_QUEUE_ADD_RPTR;
+       else
+               off = NFP_QCP_QUEUE_ADD_WPTR;
+
+       while (val > NFP_QCP_MAX_ADD) {
+               nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
+               val -= NFP_QCP_MAX_ADD;
+}
+
+nn_writel(rte_cpu_to_le_32(val), q + off);
+}
+
+/*
+ * nfp_qcp_read - Read the current Read/Write pointer value for a queue
+ * @q:  Base address for queue structure
+ * @ptr: Read or Write pointer
+ */
+static inline uint32_t
+nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
+{
+       uint32_t off;
+       uint32_t val;
+
+       if (ptr == NFP_QCP_READ_PTR)
+               off = NFP_QCP_QUEUE_STS_LO;
+       else
+               off = NFP_QCP_QUEUE_STS_HI;
+
+       val = rte_cpu_to_le_32(nn_readl(q + off));
+
+       if (ptr == NFP_QCP_READ_PTR)
+               return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
+       else
+               return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
+}
+
 #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\
        (&((struct nfp_net_adapter *)adapter)->hw)
 
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
new file mode 100644 (file)
index 0000000..9ee9e5c
--- /dev/null
@@ -0,0 +1,1002 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2021 Netronome Systems, Inc.
+ * All rights reserved.
+ *
+ * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
+ */
+
+/*
+ * vim:shiftwidth=8:noexpandtab
+ *
+ * @file dpdk/pmd/nfp_rxtx.c
+ *
+ * Netronome vNIC DPDK Poll-Mode Driver: Rx/Tx functions
+ */
+
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+
+#include "nfp_net_pmd.h"
+#include "nfp_rxtx.h"
+#include "nfp_net_logs.h"
+#include "nfp_net_ctrl.h"
+
+/* Prototypes */
+static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
+static inline void nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq);
+static inline void nfp_net_set_hash(struct nfp_net_rxq *rxq,
+                                   struct nfp_net_rx_desc *rxd,
+                                   struct rte_mbuf *mbuf);
+static inline void nfp_net_rx_cksum(struct nfp_net_rxq *rxq,
+                                   struct nfp_net_rx_desc *rxd,
+                                   struct rte_mbuf *mb);
+static void nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq);
+static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
+static void nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq);
+static inline uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq);
+static inline uint32_t nfp_net_txq_full(struct nfp_net_txq *txq);
+static inline void nfp_net_tx_tso(struct nfp_net_txq *txq,
+                                 struct nfp_net_tx_desc *txd,
+                                 struct rte_mbuf *mb);
+static inline void nfp_net_tx_cksum(struct nfp_net_txq *txq,
+                                   struct nfp_net_tx_desc *txd,
+                                   struct rte_mbuf *mb);
+
+static int
+nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
+{
+       struct nfp_net_rx_buff *rxe = rxq->rxbufs;
+       uint64_t dma_addr;
+       unsigned int i;
+
+       PMD_RX_LOG(DEBUG, "Fill Rx Freelist for %u descriptors",
+                  rxq->rx_count);
+
+       for (i = 0; i < rxq->rx_count; i++) {
+               struct nfp_net_rx_desc *rxd;
+               struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
+
+               if (mbuf == NULL) {
+                       PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+                               (unsigned int)rxq->qidx);
+                       return -ENOMEM;
+               }
+
+               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
+
+               rxd = &rxq->rxds[i];
+               rxd->fld.dd = 0;
+               rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+               rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
+               rxe[i].mbuf = mbuf;
+               PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
+       }
+
+       /* Make sure all writes are flushed before telling the hardware */
+       rte_wmb();
+
+       /* Not advertising the whole ring as the firmware gets confused if so */
+       PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
+                  rxq->rx_count - 1);
+
+       nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
+
+       return 0;
+}
+
+int
+nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
+                       return -1;
+       }
+       return 0;
+}
+
+uint32_t
+nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+       struct nfp_net_rxq *rxq;
+       struct nfp_net_rx_desc *rxds;
+       uint32_t idx;
+       uint32_t count;
+
+       rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
+
+       idx = rxq->rd_p;
+
+       count = 0;
+
+       /*
+        * Other PMDs are just checking the DD bit in intervals of 4
+        * descriptors and counting all four if the first has the DD
+        * bit on. Of course, this is not accurate but can be good for
+        * performance. But ideally that should be done in descriptors
+        * chunks belonging to the same cache line
+        */
+
+       while (count < rxq->rx_count) {
+               rxds = &rxq->rxds[idx];
+               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+                       break;
+
+               count++;
+               idx++;
+
+               /* Wrapping? */
+               if ((idx) == rxq->rx_count)
+                       idx = 0;
+       }
+
+       return count;
+}
+
+static inline void
+nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
+{
+       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+}
+
+/*
+ * nfp_net_set_hash - Set mbuf hash data
+ *
+ * The RSS hash and hash-type are pre-pended to the packet data.
+ * Extract and decode it and set the mbuf fields.
+ */
+static inline void
+nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
+                struct rte_mbuf *mbuf)
+{
+       struct nfp_net_hw *hw = rxq->hw;
+       uint8_t *meta_offset;
+       uint32_t meta_info;
+       uint32_t hash = 0;
+       uint32_t hash_type = 0;
+
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
+               return;
+
+       /* this is true for new firmwares */
+       if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
+           (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
+            NFP_DESC_META_LEN(rxd))) {
+               /*
+                * new metadata api:
+                * <----  32 bit  ----->
+                * m    field type word
+                * e     data field #2
+                * t     data field #1
+                * a     data field #0
+                * ====================
+                *    packet data
+                *
+                * Field type word contains up to 8 4bit field types
+                * A 4bit field type refers to a data field word
+                * A data field word can have several 4bit field types
+                */
+               meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
+               meta_offset -= NFP_DESC_META_LEN(rxd);
+               meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+               meta_offset += 4;
+               /* NFP PMD just supports metadata for hashing */
+               switch (meta_info & NFP_NET_META_FIELD_MASK) {
+               case NFP_NET_META_HASH:
+                       /* next field type is about the hash type */
+                       meta_info >>= NFP_NET_META_FIELD_SIZE;
+                       /* hash value is in the data field */
+                       hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
+                       hash_type = meta_info & NFP_NET_META_FIELD_MASK;
+                       break;
+               default:
+                       /* Unsupported metadata can be a performance issue */
+                       return;
+               }
+       } else {
+               if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+                       return;
+
+               hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+               hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
+       }
+
+       mbuf->hash.rss = hash;
+       mbuf->ol_flags |= PKT_RX_RSS_HASH;
+
+       switch (hash_type) {
+       case NFP_NET_RSS_IPV4:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
+               break;
+       case NFP_NET_RSS_IPV6:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
+               break;
+       case NFP_NET_RSS_IPV6_EX:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+               break;
+       case NFP_NET_RSS_IPV4_TCP:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+               break;
+       case NFP_NET_RSS_IPV6_TCP:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+               break;
+       case NFP_NET_RSS_IPV4_UDP:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+               break;
+       case NFP_NET_RSS_IPV6_UDP:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+               break;
+       default:
+               mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
+       }
+}
+
+/* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
+static inline void
+nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
+                struct rte_mbuf *mb)
+{
+       struct nfp_net_hw *hw = rxq->hw;
+
+       if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
+               return;
+
+       /* If IPv4 and IP checksum error, fail */
+       if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
+           !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
+               mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+       else
+               mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+       /* If neither UDP nor TCP return */
+       if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
+           !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
+               return;
+
+       if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
+               mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+       else
+               mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+}
+
+/*
+ * RX path design:
+ *
+ * There are some decisions to take:
+ * 1) How to check DD RX descriptors bit
+ * 2) How and when to allocate new mbufs
+ *
+ * Current implementation checks just one single DD bit each loop. As each
+ * descriptor is 8 bytes, it is likely a good idea to check descriptors in
+ * a single cache line instead. Tests with this change have not shown any
+ * performance improvement but it requires further investigation. For example,
+ * depending on which descriptor is next, the number of descriptors could be
+ * less than 8 for just checking those in the same cache line. This implies
+ * extra work which could be counterproductive by itself. Indeed, last firmware
+ * changes are just doing this: writing several descriptors with the DD bit
+ * for saving PCIe bandwidth and DMA operations from the NFP.
+ *
+ * Mbuf allocation is done when a new packet is received. Then the descriptor
+ * is automatically linked with the new mbuf and the old one is given to the
+ * user. The main drawback with this design is mbuf allocation is heavier than
+ * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
+ * cache point of view it does not seem allocating the mbuf early on as we are
+ * doing now have any benefit at all. Again, tests with this change have not
+ * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
+ * so looking at the implications of this type of allocation should be studied
+ * deeply
+ */
+
+uint16_t
+nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct nfp_net_rxq *rxq;
+       struct nfp_net_rx_desc *rxds;
+       struct nfp_net_rx_buff *rxb;
+       struct nfp_net_hw *hw;
+       struct rte_mbuf *mb;
+       struct rte_mbuf *new_mb;
+       uint16_t nb_hold;
+       uint64_t dma_addr;
+       int avail;
+
+       rxq = rx_queue;
+       if (unlikely(rxq == NULL)) {
+               /*
+                * DPDK just checks the queue is lower than max queues
+                * enabled. But the queue needs to be configured
+                */
+               RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
+               return -EINVAL;
+       }
+
+       hw = rxq->hw;
+       avail = 0;
+       nb_hold = 0;
+
+       while (avail < nb_pkts) {
+               rxb = &rxq->rxbufs[rxq->rd_p];
+               if (unlikely(rxb == NULL)) {
+                       RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
+                       break;
+               }
+
+               rxds = &rxq->rxds[rxq->rd_p];
+               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+                       break;
+
+               /*
+                * Memory barrier to ensure that we won't do other
+                * reads before the DD bit.
+                */
+               rte_rmb();
+
+               /*
+                * We got a packet. Let's alloc a new mbuf for refilling the
+                * free descriptor ring as soon as possible
+                */
+               new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
+               if (unlikely(new_mb == NULL)) {
+                       RTE_LOG_DP(DEBUG, PMD,
+                       "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+                               rxq->port_id, (unsigned int)rxq->qidx);
+                       nfp_net_mbuf_alloc_failed(rxq);
+                       break;
+               }
+
+               nb_hold++;
+
+               /*
+                * Grab the mbuf and refill the descriptor with the
+                * previously allocated mbuf
+                */
+               mb = rxb->mbuf;
+               rxb->mbuf = new_mb;
+
+               PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
+                          rxds->rxd.data_len, rxq->mbuf_size);
+
+               /* Size of this segment */
+               mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+               /* Size of the whole packet. We just support 1 segment */
+               mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+
+               if (unlikely((mb->data_len + hw->rx_offset) >
+                            rxq->mbuf_size)) {
+                       /*
+                        * This should not happen and the user has the
+                        * responsibility of avoiding it. But we have
+                        * to give some info about the error
+                        */
+                       RTE_LOG_DP(ERR, PMD,
+                               "mbuf overflow likely due to the RX offset.\n"
+                               "\t\tYour mbuf size should have extra space for"
+                               " RX offset=%u bytes.\n"
+                               "\t\tCurrently you just have %u bytes available"
+                               " but the received packet is %u bytes long",
+                               hw->rx_offset,
+                               rxq->mbuf_size - hw->rx_offset,
+                               mb->data_len);
+                       return -EINVAL;
+               }
+
+               /* Filling the received mbuf with packet info */
+               if (hw->rx_offset)
+                       mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
+               else
+                       mb->data_off = RTE_PKTMBUF_HEADROOM +
+                                      NFP_DESC_META_LEN(rxds);
+
+               /* No scatter mode supported */
+               mb->nb_segs = 1;
+               mb->next = NULL;
+
+               mb->port = rxq->port_id;
+
+               /* Checking the RSS flag */
+               nfp_net_set_hash(rxq, rxds, mb);
+
+               /* Checking the checksum flag */
+               nfp_net_rx_cksum(rxq, rxds, mb);
+
+               if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
+                   (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
+                       mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
+                       mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               }
+
+               /* Adding the mbuf to the mbuf array passed by the app */
+               rx_pkts[avail++] = mb;
+
+               /* Now resetting and updating the descriptor */
+               rxds->vals[0] = 0;
+               rxds->vals[1] = 0;
+               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
+               rxds->fld.dd = 0;
+               rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
+               rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+
+               rxq->rd_p++;
+               if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+                       rxq->rd_p = 0;
+       }
+
+       if (nb_hold == 0)
+               return nb_hold;
+
+       PMD_RX_LOG(DEBUG, "RX  port_id=%u queue_id=%u, %d packets received",
+                  rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
+
+       nb_hold += rxq->nb_rx_hold;
+
+       /*
+        * FL descriptors needs to be written before incrementing the
+        * FL queue WR pointer
+        */
+       rte_wmb();
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
+                          rxq->port_id, (unsigned int)rxq->qidx,
+                          (unsigned int)nb_hold, (unsigned int)avail);
+               nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+
+       return avail;
+}
+
+static void
+nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
+{
+       unsigned int i;
+
+       if (rxq->rxbufs == NULL)
+               return;
+
+       for (i = 0; i < rxq->rx_count; i++) {
+               if (rxq->rxbufs[i].mbuf) {
+                       rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
+                       rxq->rxbufs[i].mbuf = NULL;
+               }
+       }
+}
+
+void
+nfp_net_rx_queue_release(void *rx_queue)
+{
+       struct nfp_net_rxq *rxq = rx_queue;
+
+       if (rxq) {
+               nfp_net_rx_queue_release_mbufs(rxq);
+               rte_free(rxq->rxbufs);
+               rte_free(rxq);
+       }
+}
+
+void
+nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
+{
+       nfp_net_rx_queue_release_mbufs(rxq);
+       rxq->rd_p = 0;
+       rxq->nb_rx_hold = 0;
+}
+
+int
+nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
+                      uint16_t queue_idx, uint16_t nb_desc,
+                      unsigned int socket_id,
+                      const struct rte_eth_rxconf *rx_conf,
+                      struct rte_mempool *mp)
+{
+       const struct rte_memzone *tz;
+       struct nfp_net_rxq *rxq;
+       struct nfp_net_hw *hw;
+       uint32_t rx_desc_sz;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Validating number of descriptors */
+       rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
+       if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+           nb_desc > NFP_NET_MAX_RX_DESC ||
+           nb_desc < NFP_NET_MIN_RX_DESC) {
+               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+               return -EINVAL;
+       }
+
+       /*
+        * Free memory prior to re-allocation if needed. This is the case after
+        * calling nfp_net_stop
+        */
+       if (dev->data->rx_queues[queue_idx]) {
+               nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocating rx queue data structure */
+       rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxq == NULL)
+               return -ENOMEM;
+
+       /* Hw queues mapping based on firmware configuration */
+       rxq->qidx = queue_idx;
+       rxq->fl_qcidx = queue_idx * hw->stride_rx;
+       rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
+       rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
+       rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
+
+       /*
+        * Tracking mbuf size for detecting a potential mbuf overflow due to
+        * RX offset
+        */
+       rxq->mem_pool = mp;
+       rxq->mbuf_size = rxq->mem_pool->elt_size;
+       rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+       hw->flbufsz = rxq->mbuf_size;
+
+       rxq->rx_count = nb_desc;
+       rxq->port_id = dev->data->port_id;
+       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       rxq->drop_en = rx_conf->rx_drop_en;
+
+       /*
+        * Allocate RX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+                                  sizeof(struct nfp_net_rx_desc) *
+                                  NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
+                                  socket_id);
+
+       if (tz == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating rx dma");
+               nfp_net_rx_queue_release(rxq);
+               return -ENOMEM;
+       }
+
+       /* Saving physical and virtual addresses for the RX ring */
+       rxq->dma = (uint64_t)tz->iova;
+       rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
+
+       /* mbuf pointers array for referencing mbufs linked to RX descriptors */
+       rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
+                                        sizeof(*rxq->rxbufs) * nb_desc,
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+       if (rxq->rxbufs == NULL) {
+               nfp_net_rx_queue_release(rxq);
+               return -ENOMEM;
+       }
+
+       PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+                  rxq->rxbufs, rxq->rxds, (unsigned long)rxq->dma);
+
+       nfp_net_reset_rx_queue(rxq);
+
+       dev->data->rx_queues[queue_idx] = rxq;
+       rxq->hw = hw;
+
+       /*
+        * Telling the HW about the physical address of the RX ring and number
+        * of descriptors in log2 format
+        */
+       nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
+       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+       return 0;
+}
+
+/*
+ * nfp_net_tx_free_bufs - Check for descriptors with a complete
+ * status
+ * @txq: TX queue to work with
+ * Returns number of descriptors freed
+ */
+static int
+nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
+{
+       uint32_t qcp_rd_p;
+       int todo;
+
+       PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
+                  " status", txq->qidx);
+
+       /* Work out how many packets have been sent */
+       qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
+
+       if (qcp_rd_p == txq->rd_p) {
+               PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
+                          "packets (%u, %u)", txq->qidx,
+                          qcp_rd_p, txq->rd_p);
+               return 0;
+       }
+
+       if (qcp_rd_p > txq->rd_p)
+               todo = qcp_rd_p - txq->rd_p;
+       else
+               todo = qcp_rd_p + txq->tx_count - txq->rd_p;
+
+       PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
+                  qcp_rd_p, txq->rd_p, txq->rd_p);
+
+       if (todo == 0)
+               return todo;
+
+       txq->rd_p += todo;
+       if (unlikely(txq->rd_p >= txq->tx_count))
+               txq->rd_p -= txq->tx_count;
+
+       return todo;
+}
+
+static void
+nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
+{
+       unsigned int i;
+
+       if (txq->txbufs == NULL)
+               return;
+
+       for (i = 0; i < txq->tx_count; i++) {
+               if (txq->txbufs[i].mbuf) {
+                       rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
+                       txq->txbufs[i].mbuf = NULL;
+               }
+       }
+}
+
+void
+nfp_net_tx_queue_release(void *tx_queue)
+{
+       struct nfp_net_txq *txq = tx_queue;
+
+       if (txq) {
+               nfp_net_tx_queue_release_mbufs(txq);
+               rte_free(txq->txbufs);
+               rte_free(txq);
+       }
+}
+
+void
+nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
+{
+       nfp_net_tx_queue_release_mbufs(txq);
+       txq->wr_p = 0;
+       txq->rd_p = 0;
+}
+
+int
+nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                      uint16_t nb_desc, unsigned int socket_id,
+                      const struct rte_eth_txconf *tx_conf)
+{
+       const struct rte_memzone *tz;
+       struct nfp_net_txq *txq;
+       uint16_t tx_free_thresh;
+       struct nfp_net_hw *hw;
+       uint32_t tx_desc_sz;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Validating number of descriptors */
+       tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
+       if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+           nb_desc > NFP_NET_MAX_TX_DESC ||
+           nb_desc < NFP_NET_MIN_TX_DESC) {
+               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+               return -EINVAL;
+       }
+
+       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+                                   tx_conf->tx_free_thresh :
+                                   DEFAULT_TX_FREE_THRESH);
+
+       if (tx_free_thresh > (nb_desc)) {
+               PMD_DRV_LOG(ERR,
+                       "tx_free_thresh must be less than the number of TX "
+                       "descriptors. (tx_free_thresh=%u port=%d "
+                       "queue=%d)", (unsigned int)tx_free_thresh,
+                       dev->data->port_id, (int)queue_idx);
+               return -(EINVAL);
+       }
+
+       /*
+        * Free memory prior to re-allocation if needed. This is the case after
+        * calling nfp_net_stop
+        */
+       if (dev->data->tx_queues[queue_idx]) {
+               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+                          queue_idx);
+               nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               return -ENOMEM;
+       }
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                                  sizeof(struct nfp_net_tx_desc) *
+                                  NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
+                                  socket_id);
+       if (tz == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               nfp_net_tx_queue_release(txq);
+               return -ENOMEM;
+       }
+
+       txq->tx_count = nb_desc;
+       txq->tx_free_thresh = tx_free_thresh;
+       txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
+       txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
+       txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
+
+       /* queue mapping based on firmware configuration */
+       txq->qidx = queue_idx;
+       txq->tx_qcidx = queue_idx * hw->stride_tx;
+       txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
+
+       txq->port_id = dev->data->port_id;
+
+       /* Saving physical and virtual addresses for the TX ring */
+       txq->dma = (uint64_t)tz->iova;
+       txq->txds = (struct nfp_net_tx_desc *)tz->addr;
+
+       /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+       txq->txbufs = rte_zmalloc_socket("txq->txbufs",
+                                        sizeof(*txq->txbufs) * nb_desc,
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq->txbufs == NULL) {
+               nfp_net_tx_queue_release(txq);
+               return -ENOMEM;
+       }
+       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+                  txq->txbufs, txq->txds, (unsigned long)txq->dma);
+
+       nfp_net_reset_tx_queue(txq);
+
+       dev->data->tx_queues[queue_idx] = txq;
+       txq->hw = hw;
+
+       /*
+        * Telling the HW about the physical address of the TX ring and number
+        * of descriptors in log2 format
+        */
+       nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
+
+       return 0;
+}
+
+/* Leaving always free descriptors for avoiding wrapping confusion */
+static inline
+uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+{
+       if (txq->wr_p >= txq->rd_p)
+               return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+       else
+               return txq->rd_p - txq->wr_p - 8;
+}
+
+/*
+ * nfp_net_txq_full - Check if the TX queue free descriptors
+ * is below tx_free_threshold
+ *
+ * @txq: TX queue to check
+ *
+ * This function uses the host copy* of read/write pointers
+ */
+static inline
+uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
+{
+       return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
+}
+
+/* nfp_net_tx_tso - Set TX descriptor for TSO */
+static inline void
+nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+              struct rte_mbuf *mb)
+{
+       uint64_t ol_flags;
+       struct nfp_net_hw *hw = txq->hw;
+
+       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
+               goto clean_txd;
+
+       ol_flags = mb->ol_flags;
+
+       if (!(ol_flags & PKT_TX_TCP_SEG))
+               goto clean_txd;
+
+       txd->l3_offset = mb->l2_len;
+       txd->l4_offset = mb->l2_len + mb->l3_len;
+       txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+       txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
+       txd->flags = PCIE_DESC_TX_LSO;
+       return;
+
+clean_txd:
+       txd->flags = 0;
+       txd->l3_offset = 0;
+       txd->l4_offset = 0;
+       txd->lso_hdrlen = 0;
+       txd->mss = 0;
+}
+
+/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
+static inline void
+nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+                struct rte_mbuf *mb)
+{
+       uint64_t ol_flags;
+       struct nfp_net_hw *hw = txq->hw;
+
+       if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
+               return;
+
+       ol_flags = mb->ol_flags;
+
+       /* IPv6 does not need checksum */
+       if (ol_flags & PKT_TX_IP_CKSUM)
+               txd->flags |= PCIE_DESC_TX_IP4_CSUM;
+
+       switch (ol_flags & PKT_TX_L4_MASK) {
+       case PKT_TX_UDP_CKSUM:
+               txd->flags |= PCIE_DESC_TX_UDP_CSUM;
+               break;
+       case PKT_TX_TCP_CKSUM:
+               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
+               break;
+       }
+
+       if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+               txd->flags |= PCIE_DESC_TX_CSUM;
+}
+
+uint16_t
+nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct nfp_net_txq *txq;
+       struct nfp_net_hw *hw;
+       struct nfp_net_tx_desc *txds, txd;
+       struct rte_mbuf *pkt;
+       uint64_t dma_addr;
+       int pkt_size, dma_size;
+       uint16_t free_descs, issued_descs;
+       struct rte_mbuf **lmbuf;
+       int i;
+
+       txq = tx_queue;
+       hw = txq->hw;
+       txds = &txq->txds[txq->wr_p];
+
+       PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
+                  txq->qidx, txq->wr_p, nb_pkts);
+
+       if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+               nfp_net_tx_free_bufs(txq);
+
+       free_descs = (uint16_t)nfp_free_tx_desc(txq);
+       if (unlikely(free_descs == 0))
+               return 0;
+
+       pkt = *tx_pkts;
+
+       i = 0;
+       issued_descs = 0;
+       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
+                  txq->qidx, nb_pkts);
+       /* Sending packets */
+       while ((i < nb_pkts) && free_descs) {
+               /* Grabbing the mbuf linked to the current descriptor */
+               lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+               /* Warming the cache for releasing the mbuf later on */
+               RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+
+               pkt = *(tx_pkts + i);
+
+               if (unlikely(pkt->nb_segs > 1 &&
+                            !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
+                       PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
+                       rte_panic("Multisegment packet unsupported\n");
+               }
+
+               /* Checking if we have enough descriptors */
+               if (unlikely(pkt->nb_segs > free_descs))
+                       goto xmit_end;
+
+               /*
+                * Checksum and VLAN flags just in the first descriptor for a
+                * multisegment packet, but TSO info needs to be in all of them.
+                */
+               txd.data_len = pkt->pkt_len;
+               nfp_net_tx_tso(txq, &txd, pkt);
+               nfp_net_tx_cksum(txq, &txd, pkt);
+
+               if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
+                   (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
+                       txd.flags |= PCIE_DESC_TX_VLAN;
+                       txd.vlan = pkt->vlan_tci;
+               }
+
+               /*
+                * mbuf data_len is the data in one segment and pkt_len data
+                * in the whole packet. When the packet is just one segment,
+                * then data_len = pkt_len
+                */
+               pkt_size = pkt->pkt_len;
+
+               while (pkt) {
+                       /* Copying TSO, VLAN and cksum info */
+                       *txds = txd;
+
+                       /* Releasing mbuf used by this descriptor previously*/
+                       if (*lmbuf)
+                               rte_pktmbuf_free_seg(*lmbuf);
+
+                       /*
+                        * Linking mbuf with descriptor for being released
+                        * next time descriptor is used
+                        */
+                       *lmbuf = pkt;
+
+                       dma_size = pkt->data_len;
+                       dma_addr = rte_mbuf_data_iova(pkt);
+                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
+                                  "%" PRIx64 "", dma_addr);
+
+                       /* Filling descriptors fields */
+                       txds->dma_len = dma_size;
+                       txds->data_len = txd.data_len;
+                       txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+                       txds->dma_addr_lo = (dma_addr & 0xffffffff);
+                       ASSERT(free_descs > 0);
+                       free_descs--;
+
+                       txq->wr_p++;
+                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+                               txq->wr_p = 0;
+
+                       pkt_size -= dma_size;
+
+                       /*
+                        * Making the EOP, packets with just one segment
+                        * the priority
+                        */
+                       if (likely(!pkt_size))
+                               txds->offset_eop = PCIE_DESC_TX_EOP;
+                       else
+                               txds->offset_eop = 0;
+
+                       pkt = pkt->next;
+                       /* Referencing next free TX descriptor */
+                       txds = &txq->txds[txq->wr_p];
+                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+                       issued_descs++;
+               }
+               i++;
+       }
+
+xmit_end:
+       /* Increment write pointers. Force memory write before we let HW know */
+       rte_wmb();
+       nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
+
+       return i;
+}
index 41a3a4b..d2d0f3f 100644 (file)
 #include <linux/types.h>
 #include <rte_io.h>
 
+#define NFP_DESC_META_LEN(d) ((d)->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
+
+#define NFP_HASH_OFFSET      ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
+#define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
+
+#define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
+       ((uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM))
+
 /*
  * The maximum number of descriptors is limited by design as
  * DPDK uses uint16_t variables for these values
@@ -266,6 +274,25 @@ struct nfp_net_rxq {
        int rx_qcidx;
 } __rte_aligned(64);
 
+int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev);
+uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
+                                      uint16_t queue_idx);
+uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts);
+void nfp_net_rx_queue_release(void *rxq);
+void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq);
+int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                                 uint16_t nb_desc, unsigned int socket_id,
+                                 const struct rte_eth_rxconf *rx_conf,
+                                 struct rte_mempool *mp);
+void nfp_net_tx_queue_release(void *txq);
+void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
+int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                                 uint16_t nb_desc, unsigned int socket_id,
+                                 const struct rte_eth_txconf *tx_conf);
+uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                 uint16_t nb_pkts);
+
 #endif /* _NFP_RXTX_H_ */
 /*
  * Local variables: