net/dpaa: support Tx and Rx queue setup
authorShreyansh Jain <shreyansh.jain@nxp.com>
Thu, 28 Sep 2017 12:29:44 +0000 (17:59 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 6 Oct 2017 00:49:50 +0000 (02:49 +0200)
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain@nxp.com>
drivers/net/dpaa/Makefile
drivers/net/dpaa/dpaa_ethdev.c
drivers/net/dpaa/dpaa_rxtx.c [new file with mode: 0644]
drivers/net/dpaa/dpaa_rxtx.h [new file with mode: 0644]

index bb305ca..c77384c 100644 (file)
@@ -38,10 +38,12 @@ LIB = librte_pmd_dpaa.a
 
 CFLAGS := -I$(SRCDIR) $(CFLAGS)
 CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -Wno-pointer-arith
 CFLAGS += -I$(RTE_SDK_DPAA)/
 CFLAGS += -I$(RTE_SDK_DPAA)/include
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
 CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal/include
 
@@ -51,7 +53,9 @@ LIBABIVER := 1
 
 # Interfaces with DPDK
 SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
 
 LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
 
 include $(RTE_SDK)/mk/rte.lib.mk
index 4543dfc..4996daa 100644 (file)
 
 #include <rte_dpaa_bus.h>
 #include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
 
 #include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -78,20 +85,104 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
        PMD_INIT_FUNC_TRACE();
 
        /* Change tx callback to the real one */
-       dev->tx_pkt_burst = NULL;
+       dev->tx_pkt_burst = dpaa_eth_queue_tx;
+       fman_if_enable_rx(dpaa_intf->fif);
 
        return 0;
 }
 
 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 {
-       dev->tx_pkt_burst = NULL;
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       fman_if_disable_rx(dpaa_intf->fif);
+       dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 }
 
-static void dpaa_eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       dpaa_eth_dev_stop(dev);
+}
+
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                           uint16_t nb_desc __rte_unused,
+                           unsigned int socket_id __rte_unused,
+                           const struct rte_eth_rxconf *rx_conf __rte_unused,
+                           struct rte_mempool *mp)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+
+       if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+               struct fman_if_ic_params icp;
+               uint32_t fd_offset;
+               uint32_t bp_size;
+
+               if (!mp->pool_data) {
+                       DPAA_PMD_ERR("Not an offloaded buffer pool!");
+                       return -1;
+               }
+               dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+               memset(&icp, 0, sizeof(icp));
+               /* set ICEOF for to the default value , which is 0*/
+               icp.iciof = DEFAULT_ICIOF;
+               icp.iceof = DEFAULT_RX_ICEOF;
+               icp.icsz = DEFAULT_ICSZ;
+               fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+               fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+               fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+               /* Buffer pool size should be equal to Dataroom Size*/
+               bp_size = rte_pktmbuf_data_room_size(mp);
+               fman_if_set_bp(dpaa_intf->fif, mp->size,
+                              dpaa_intf->bp_info->bpid, bp_size);
+               dpaa_intf->valid = 1;
+               DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
+                           dpaa_intf->name, fd_offset,
+                       fman_if_get_fdoff(dpaa_intf->fif));
+       }
+       dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+
+       return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+       PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                           uint16_t nb_desc __rte_unused,
+               unsigned int socket_id __rte_unused,
+               const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+       dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+       return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
 {
        PMD_INIT_FUNC_TRACE();
 }
@@ -101,15 +192,102 @@ static struct eth_dev_ops dpaa_devops = {
        .dev_start                = dpaa_eth_dev_start,
        .dev_stop                 = dpaa_eth_dev_stop,
        .dev_close                = dpaa_eth_dev_close,
+
+       .rx_queue_setup           = dpaa_eth_rx_queue_setup,
+       .tx_queue_setup           = dpaa_eth_tx_queue_setup,
+       .rx_queue_release         = dpaa_eth_rx_queue_release,
+       .tx_queue_release         = dpaa_eth_tx_queue_release,
 };
 
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq,
+                             uint32_t fqid)
+{
+       struct qm_mcc_initfq opts;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = qman_reserve_fqid(fqid);
+       if (ret) {
+               DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+                            fqid, ret);
+               return -EINVAL;
+       }
+
+       DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+       ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+       if (ret) {
+               DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+                       fqid, ret);
+               return ret;
+       }
+
+       opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+                      QM_INITFQ_WE_CONTEXTA;
+
+       opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+       opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+                          QM_FQCTRL_PREFERINCACHE;
+       opts.fqd.context_a.stashing.exclusive = 0;
+       opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
+       opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+       opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+
+       /*Enable tail drop */
+       opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
+       opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
+       qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
+
+       ret = qman_init_fq(fq, 0, &opts);
+       if (ret)
+               DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+       return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+                             struct fman_if *fman_intf)
+{
+       struct qm_mcc_initfq opts;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+                            QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+       if (ret) {
+               DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
+               return ret;
+       }
+       opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+       opts.fqd.dest.channel = fman_intf->tx_channel_id;
+       opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+       opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+       opts.fqd.context_b = 0;
+       /* no tx-confirmation */
+       opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+       opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+       DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+       ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+       if (ret)
+               DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+       return ret;
+}
+
 /* Initialise a network interface */
 static int
 dpaa_dev_init(struct rte_eth_dev *eth_dev)
 {
+       int num_cores, num_rx_fqs, fqid;
+       int loop, ret = 0;
        int dev_id;
        struct rte_dpaa_device *dpaa_device;
        struct dpaa_if *dpaa_intf;
+       struct fm_eth_port_cfg *cfg;
+       struct fman_if *fman_intf;
+       struct fman_if_bpool *bp, *tmp_bp;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -120,12 +298,108 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
        dev_id = dpaa_device->id.dev_id;
        dpaa_intf = eth_dev->data->dev_private;
+       cfg = &dpaa_netcfg->port_cfg[dev_id];
+       fman_intf = cfg->fman_if;
 
        dpaa_intf->name = dpaa_device->name;
 
+       /* save fman_if & cfg in the interface struture */
+       dpaa_intf->fif = fman_intf;
        dpaa_intf->ifid = dev_id;
+       dpaa_intf->cfg = cfg;
+
+       /* Initialize Rx FQ's */
+       if (getenv("DPAA_NUM_RX_QUEUES"))
+               num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+       else
+               num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
 
+       /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+        * queues.
+        */
+       if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+               DPAA_PMD_ERR("Invalid number of RX queues\n");
+               return -EINVAL;
+       }
+
+       dpaa_intf->rx_queues = rte_zmalloc(NULL,
+               sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+       for (loop = 0; loop < num_rx_fqs; loop++) {
+               fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+                       DPAA_PCD_FQID_MULTIPLIER + loop;
+               ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+               if (ret)
+                       return ret;
+               dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+       }
+       dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+       /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+       num_cores = rte_lcore_count();
+       dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+               num_cores, MAX_CACHELINE);
+       if (!dpaa_intf->tx_queues)
+               return -ENOMEM;
+
+       for (loop = 0; loop < num_cores; loop++) {
+               ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+                                        fman_intf);
+               if (ret)
+                       return ret;
+               dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+       }
+       dpaa_intf->nb_tx_queues = num_cores;
+
+       DPAA_PMD_DEBUG("All frame queues created");
+
+       /* reset bpool list, initialize bpool dynamically */
+       list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+               list_del(&bp->node);
+               rte_free(bp);
+       }
+
+       /* Populate ethdev structure */
        eth_dev->dev_ops = &dpaa_devops;
+       eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+       eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
+                                               "store MAC addresses",
+                               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+               rte_free(dpaa_intf->rx_queues);
+               rte_free(dpaa_intf->tx_queues);
+               dpaa_intf->rx_queues = NULL;
+               dpaa_intf->tx_queues = NULL;
+               dpaa_intf->nb_rx_queues = 0;
+               dpaa_intf->nb_tx_queues = 0;
+               return -ENOMEM;
+       }
+
+       /* copy the primary mac address */
+       ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+       RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+               dpaa_device->name,
+               fman_intf->mac_addr.addr_bytes[0],
+               fman_intf->mac_addr.addr_bytes[1],
+               fman_intf->mac_addr.addr_bytes[2],
+               fman_intf->mac_addr.addr_bytes[3],
+               fman_intf->mac_addr.addr_bytes[4],
+               fman_intf->mac_addr.addr_bytes[5]);
+
+       /* Disable RX mode */
+       fman_if_discard_rx_errors(fman_intf);
+       fman_if_disable_rx(fman_intf);
+       /* Disable promiscuous mode */
+       fman_if_promiscuous_disable(fman_intf);
+       /* Disable multicast */
+       fman_if_reset_mcast_filter_table(fman_intf);
+       /* Reset interface statistics */
+       fman_if_stats_reset(fman_intf);
 
        return 0;
 }
@@ -147,6 +421,20 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
 
        dpaa_eth_dev_close(dev);
 
+       /* release configuration memory */
+       if (dpaa_intf->fc_conf)
+               rte_free(dpaa_intf->fc_conf);
+
+       rte_free(dpaa_intf->rx_queues);
+       dpaa_intf->rx_queues = NULL;
+
+       rte_free(dpaa_intf->tx_queues);
+       dpaa_intf->tx_queues = NULL;
+
+       /* free memory for storing MAC addresses */
+       rte_free(dev->data->mac_addrs);
+       dev->data->mac_addrs = NULL;
+
        dev->dev_ops = NULL;
        dev->rx_pkt_burst = NULL;
        dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
new file mode 100644 (file)
index 0000000..c4e67f5
--- /dev/null
@@ -0,0 +1,370 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright 2017 NXP.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <limits.h>
+#include <sched.h>
+#include <pthread.h>
+
+#include <rte_config.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "dpaa_ethdev.h"
+#include "dpaa_rxtx.h"
+#include <rte_dpaa_bus.h>
+#include <dpaa_mempool.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <of.h>
+#include <netcfg.h>
+
+#define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
+       do { \
+               (_fd)->cmd = 0; \
+               (_fd)->opaque_addr = 0; \
+               (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
+               (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
+               (_fd)->opaque |= (_mbuf)->pkt_len; \
+               (_fd)->addr = (_mbuf)->buf_physaddr; \
+               (_fd)->bpid = _bpid; \
+       } while (0)
+
+static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
+                                                       uint32_t ifid)
+{
+       struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
+       struct rte_mbuf *mbuf;
+       void *ptr;
+       uint16_t offset =
+               (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
+       uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
+
+       DPAA_DP_LOG(DEBUG, " FD--->MBUF");
+
+       /* Ignoring case when format != qm_fd_contig */
+       ptr = rte_dpaa_mem_ptov(fd->addr);
+       /* Ignoring case when ptr would be NULL. That is only possible incase
+        * of a corrupted packet
+        */
+
+       mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
+       /* Prefetch the Parse results and packet data to L1 */
+       rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
+       rte_prefetch0((void *)((uint8_t *)ptr + offset));
+
+       mbuf->data_off = offset;
+       mbuf->data_len = length;
+       mbuf->pkt_len = length;
+
+       mbuf->port = ifid;
+       mbuf->nb_segs = 1;
+       mbuf->ol_flags = 0;
+       mbuf->next = NULL;
+       rte_mbuf_refcnt_set(mbuf, 1);
+
+       return mbuf;
+}
+
+uint16_t dpaa_eth_queue_rx(void *q,
+                          struct rte_mbuf **bufs,
+                          uint16_t nb_bufs)
+{
+       struct qman_fq *fq = q;
+       struct qm_dqrr_entry *dq;
+       uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
+       int ret;
+
+       ret = rte_dpaa_portal_init((void *)0);
+       if (ret) {
+               DPAA_PMD_ERR("Failure in affining portal");
+               return 0;
+       }
+
+       ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+                               DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
+       if (ret)
+               return 0;
+
+       do {
+               dq = qman_dequeue(fq);
+               if (!dq)
+                       continue;
+               bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
+               qman_dqrr_consume(fq, dq);
+       } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
+       return num_rx;
+}
+
+static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
+{
+       int ret;
+       uint64_t buf = 0;
+       struct bm_buffer bufs;
+
+       ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
+       if (ret <= 0) {
+               DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
+               return (void *)buf;
+       }
+
+       DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
+                   (uint64_t)bufs.addr, bufs.bpid);
+
+       buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
+       if (!buf)
+               goto out;
+
+out:
+       return (void *)buf;
+}
+
+static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
+                                            struct dpaa_if *dpaa_intf)
+{
+       struct rte_mbuf *dpaa_mbuf;
+
+       /* allocate pktbuffer on bpid for dpaa port */
+       dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
+       if (!dpaa_mbuf)
+               return NULL;
+
+       memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
+               ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
+
+       /* Copy only the required fields */
+       dpaa_mbuf->data_off = mbuf->data_off;
+       dpaa_mbuf->pkt_len = mbuf->pkt_len;
+       dpaa_mbuf->ol_flags = mbuf->ol_flags;
+       dpaa_mbuf->packet_type = mbuf->packet_type;
+       dpaa_mbuf->tx_offload = mbuf->tx_offload;
+       rte_pktmbuf_free(mbuf);
+       return dpaa_mbuf;
+}
+
+/* Handle mbufs which are not segmented (non SG) */
+static inline void
+tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
+                           struct dpaa_bp_info *bp_info,
+                           struct qm_fd *fd_arr)
+{
+       struct rte_mbuf *mi = NULL;
+
+       if (RTE_MBUF_DIRECT(mbuf)) {
+               if (rte_mbuf_refcnt_read(mbuf) > 1) {
+                       /* In case of direct mbuf and mbuf being cloned,
+                        * BMAN should _not_ release buffer.
+                        */
+                       DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+                       /* Buffer should be releasd by EAL */
+                       rte_mbuf_refcnt_update(mbuf, -1);
+               } else {
+                       /* In case of direct mbuf and no cloning, mbuf can be
+                        * released by BMAN.
+                        */
+                       DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+               }
+       } else {
+               /* This is data-containing core mbuf: 'mi' */
+               mi = rte_mbuf_from_indirect(mbuf);
+               if (rte_mbuf_refcnt_read(mi) > 1) {
+                       /* In case of indirect mbuf, and mbuf being cloned,
+                        * BMAN should _not_ release it and let EAL release
+                        * it through pktmbuf_free below.
+                        */
+                       DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
+               } else {
+                       /* In case of indirect mbuf, and no cloning, core mbuf
+                        * should be released by BMAN.
+                        * Increate refcnt of core mbuf so that when
+                        * pktmbuf_free is called and mbuf is released, EAL
+                        * doesn't try to release core mbuf which would have
+                        * been released by BMAN.
+                        */
+                       rte_mbuf_refcnt_update(mi, 1);
+                       DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
+               }
+               rte_pktmbuf_free(mbuf);
+       }
+}
+
+/* Handle all mbufs on dpaa BMAN managed pool */
+static inline uint16_t
+tx_on_dpaa_pool(struct rte_mbuf *mbuf,
+               struct dpaa_bp_info *bp_info,
+               struct qm_fd *fd_arr)
+{
+       DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
+
+       if (mbuf->nb_segs == 1) {
+               /* Case for non-segmented buffers */
+               tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
+       } else {
+               DPAA_PMD_DEBUG("Number of Segments not supported");
+               return 1;
+       }
+
+       return 0;
+}
+
+/* Handle all mbufs on an external pool (non-dpaa) */
+static inline uint16_t
+tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
+                   struct qm_fd *fd_arr)
+{
+       struct dpaa_if *dpaa_intf = txq->dpaa_intf;
+       struct rte_mbuf *dmable_mbuf;
+
+       DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
+                   "Allocating an offloaded buffer");
+       dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
+       if (!dmable_mbuf) {
+               DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
+               return 1;
+       }
+
+       DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+
+       return 0;
+}
+
+uint16_t
+dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+       struct rte_mbuf *mbuf, *mi = NULL;
+       struct rte_mempool *mp;
+       struct dpaa_bp_info *bp_info;
+       struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
+       uint32_t frames_to_send, loop, i = 0;
+       uint16_t state;
+       int ret;
+
+       ret = rte_dpaa_portal_init((void *)0);
+       if (ret) {
+               DPAA_PMD_ERR("Failure in affining portal");
+               return 0;
+       }
+
+       DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
+
+       while (nb_bufs) {
+               frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
+               for (loop = 0; loop < frames_to_send; loop++, i++) {
+                       mbuf = bufs[i];
+                       if (RTE_MBUF_DIRECT(mbuf)) {
+                               mp = mbuf->pool;
+                       } else {
+                               mi = rte_mbuf_from_indirect(mbuf);
+                               mp = mi->pool;
+                       }
+
+                       bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+                       if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
+                               state = tx_on_dpaa_pool(mbuf, bp_info,
+                                                       &fd_arr[loop]);
+                               if (unlikely(state)) {
+                                       /* Set frames_to_send & nb_bufs so
+                                        * that packets are transmitted till
+                                        * previous frame.
+                                        */
+                                       frames_to_send = loop;
+                                       nb_bufs = loop;
+                                       goto send_pkts;
+                               }
+                       } else {
+                               state = tx_on_external_pool(q, mbuf,
+                                                           &fd_arr[loop]);
+                               if (unlikely(state)) {
+                                       /* Set frames_to_send & nb_bufs so
+                                        * that packets are transmitted till
+                                        * previous frame.
+                                        */
+                                       frames_to_send = loop;
+                                       nb_bufs = loop;
+                                       goto send_pkts;
+                               }
+                       }
+               }
+
+send_pkts:
+               loop = 0;
+               while (loop < frames_to_send) {
+                       loop += qman_enqueue_multi(q, &fd_arr[loop],
+                                       frames_to_send - loop);
+               }
+               nb_bufs -= frames_to_send;
+       }
+
+       DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
+
+       return i;
+}
+
+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
+                             struct rte_mbuf **bufs __rte_unused,
+               uint16_t nb_bufs __rte_unused)
+{
+       DPAA_DP_LOG(DEBUG, "Drop all packets");
+
+       /* Drop all incoming packets. No need to free packets here
+        * because the rte_eth f/w frees up the packets through tx_buffer
+        * callback in case this functions returns count less than nb_bufs
+        */
+       return 0;
+}
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
new file mode 100644 (file)
index 0000000..45bfae8
--- /dev/null
@@ -0,0 +1,61 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright 2017 NXP.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPDK_RXTX_H__
+#define __DPDK_RXTX_H__
+
+/* internal offset from where IC is copied to packet buffer*/
+#define DEFAULT_ICIOF          32
+/* IC transfer size */
+#define DEFAULT_ICSZ   48
+
+/* IC offsets from buffer header address */
+#define DEFAULT_RX_ICEOF       16
+
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES    63
+       /** <Maximum number of frames to be dequeued in a single rx call*/
+/* FD structure masks and offset */
+#define DPAA_FD_FORMAT_MASK 0xE0000000
+#define DPAA_FD_OFFSET_MASK 0x1FF00000
+#define DPAA_FD_LENGTH_MASK 0xFFFFF
+#define DPAA_FD_FORMAT_SHIFT 29
+#define DPAA_FD_OFFSET_SHIFT 20
+
+uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
+
+uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
+                             struct rte_mbuf **bufs __rte_unused,
+                             uint16_t nb_bufs __rte_unused);
+#endif