net/dpaa: support Tx and Rx queue setup
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index 4543dfc..4996daa 100644 (file)
 
 #include <rte_dpaa_bus.h>
 #include <rte_dpaa_logs.h>
+#include <dpaa_mempool.h>
 
 #include <dpaa_ethdev.h>
+#include <dpaa_rxtx.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <fsl_bman.h>
+#include <fsl_fman.h>
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -78,20 +85,104 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
        PMD_INIT_FUNC_TRACE();
 
        /* Change tx callback to the real one */
-       dev->tx_pkt_burst = NULL;
+       dev->tx_pkt_burst = dpaa_eth_queue_tx;
+       fman_if_enable_rx(dpaa_intf->fif);
 
        return 0;
 }
 
 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 {
-       dev->tx_pkt_burst = NULL;
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       fman_if_disable_rx(dpaa_intf->fif);
+       dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 }
 
-static void dpaa_eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       dpaa_eth_dev_stop(dev);
+}
+
+static
+int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                           uint16_t nb_desc __rte_unused,
+                           unsigned int socket_id __rte_unused,
+                           const struct rte_eth_rxconf *rx_conf __rte_unused,
+                           struct rte_mempool *mp)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+
+       if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
+               struct fman_if_ic_params icp;
+               uint32_t fd_offset;
+               uint32_t bp_size;
+
+               if (!mp->pool_data) {
+                       DPAA_PMD_ERR("Not an offloaded buffer pool!");
+                       return -1;
+               }
+               dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
+
+               memset(&icp, 0, sizeof(icp));
+               /* set ICEOF for to the default value , which is 0*/
+               icp.iciof = DEFAULT_ICIOF;
+               icp.iceof = DEFAULT_RX_ICEOF;
+               icp.icsz = DEFAULT_ICSZ;
+               fman_if_set_ic_params(dpaa_intf->fif, &icp);
+
+               fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+               fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
+
+               /* Buffer pool size should be equal to Dataroom Size*/
+               bp_size = rte_pktmbuf_data_room_size(mp);
+               fman_if_set_bp(dpaa_intf->fif, mp->size,
+                              dpaa_intf->bp_info->bpid, bp_size);
+               dpaa_intf->valid = 1;
+               DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
+                           dpaa_intf->name, fd_offset,
+                       fman_if_get_fdoff(dpaa_intf->fif));
+       }
+       dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
+
+       return 0;
+}
+
+static
+void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
+{
+       PMD_INIT_FUNC_TRACE();
+}
+
+static
+int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                           uint16_t nb_desc __rte_unused,
+               unsigned int socket_id __rte_unused,
+               const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+
+       PMD_INIT_FUNC_TRACE();
+
+       DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+       dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+       return 0;
+}
+
+static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
 {
        PMD_INIT_FUNC_TRACE();
 }
@@ -101,15 +192,102 @@ static struct eth_dev_ops dpaa_devops = {
        .dev_start                = dpaa_eth_dev_start,
        .dev_stop                 = dpaa_eth_dev_stop,
        .dev_close                = dpaa_eth_dev_close,
+
+       .rx_queue_setup           = dpaa_eth_rx_queue_setup,
+       .tx_queue_setup           = dpaa_eth_tx_queue_setup,
+       .rx_queue_release         = dpaa_eth_rx_queue_release,
+       .tx_queue_release         = dpaa_eth_tx_queue_release,
 };
 
+/* Initialise an Rx FQ */
+static int dpaa_rx_queue_init(struct qman_fq *fq,
+                             uint32_t fqid)
+{
+       struct qm_mcc_initfq opts;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = qman_reserve_fqid(fqid);
+       if (ret) {
+               DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+                            fqid, ret);
+               return -EINVAL;
+       }
+
+       DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+       ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+       if (ret) {
+               DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+                       fqid, ret);
+               return ret;
+       }
+
+       opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+                      QM_INITFQ_WE_CONTEXTA;
+
+       opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+       opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+                          QM_FQCTRL_PREFERINCACHE;
+       opts.fqd.context_a.stashing.exclusive = 0;
+       opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
+       opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+       opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+
+       /*Enable tail drop */
+       opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
+       opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
+       qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
+
+       ret = qman_init_fq(fq, 0, &opts);
+       if (ret)
+               DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+       return ret;
+}
+
+/* Initialise a Tx FQ */
+static int dpaa_tx_queue_init(struct qman_fq *fq,
+                             struct fman_if *fman_intf)
+{
+       struct qm_mcc_initfq opts;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+                            QMAN_FQ_FLAG_TO_DCPORTAL, fq);
+       if (ret) {
+               DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
+               return ret;
+       }
+       opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+                      QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
+       opts.fqd.dest.channel = fman_intf->tx_channel_id;
+       opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
+       opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
+       opts.fqd.context_b = 0;
+       /* no tx-confirmation */
+       opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
+       opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+       DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+       ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+       if (ret)
+               DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+       return ret;
+}
+
 /* Initialise a network interface */
 static int
 dpaa_dev_init(struct rte_eth_dev *eth_dev)
 {
+       int num_cores, num_rx_fqs, fqid;
+       int loop, ret = 0;
        int dev_id;
        struct rte_dpaa_device *dpaa_device;
        struct dpaa_if *dpaa_intf;
+       struct fm_eth_port_cfg *cfg;
+       struct fman_if *fman_intf;
+       struct fman_if_bpool *bp, *tmp_bp;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -120,12 +298,108 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
        dev_id = dpaa_device->id.dev_id;
        dpaa_intf = eth_dev->data->dev_private;
+       cfg = &dpaa_netcfg->port_cfg[dev_id];
+       fman_intf = cfg->fman_if;
 
        dpaa_intf->name = dpaa_device->name;
 
+       /* save fman_if & cfg in the interface struture */
+       dpaa_intf->fif = fman_intf;
        dpaa_intf->ifid = dev_id;
+       dpaa_intf->cfg = cfg;
+
+       /* Initialize Rx FQ's */
+       if (getenv("DPAA_NUM_RX_QUEUES"))
+               num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+       else
+               num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
 
+       /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+        * queues.
+        */
+       if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+               DPAA_PMD_ERR("Invalid number of RX queues\n");
+               return -EINVAL;
+       }
+
+       dpaa_intf->rx_queues = rte_zmalloc(NULL,
+               sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+       for (loop = 0; loop < num_rx_fqs; loop++) {
+               fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+                       DPAA_PCD_FQID_MULTIPLIER + loop;
+               ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
+               if (ret)
+                       return ret;
+               dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
+       }
+       dpaa_intf->nb_rx_queues = num_rx_fqs;
+
+       /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+       num_cores = rte_lcore_count();
+       dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
+               num_cores, MAX_CACHELINE);
+       if (!dpaa_intf->tx_queues)
+               return -ENOMEM;
+
+       for (loop = 0; loop < num_cores; loop++) {
+               ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
+                                        fman_intf);
+               if (ret)
+                       return ret;
+               dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
+       }
+       dpaa_intf->nb_tx_queues = num_cores;
+
+       DPAA_PMD_DEBUG("All frame queues created");
+
+       /* reset bpool list, initialize bpool dynamically */
+       list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
+               list_del(&bp->node);
+               rte_free(bp);
+       }
+
+       /* Populate ethdev structure */
        eth_dev->dev_ops = &dpaa_devops;
+       eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+       eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
+               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
+                                               "store MAC addresses",
+                               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+               rte_free(dpaa_intf->rx_queues);
+               rte_free(dpaa_intf->tx_queues);
+               dpaa_intf->rx_queues = NULL;
+               dpaa_intf->tx_queues = NULL;
+               dpaa_intf->nb_rx_queues = 0;
+               dpaa_intf->nb_tx_queues = 0;
+               return -ENOMEM;
+       }
+
+       /* copy the primary mac address */
+       ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+       RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+               dpaa_device->name,
+               fman_intf->mac_addr.addr_bytes[0],
+               fman_intf->mac_addr.addr_bytes[1],
+               fman_intf->mac_addr.addr_bytes[2],
+               fman_intf->mac_addr.addr_bytes[3],
+               fman_intf->mac_addr.addr_bytes[4],
+               fman_intf->mac_addr.addr_bytes[5]);
+
+       /* Disable RX mode */
+       fman_if_discard_rx_errors(fman_intf);
+       fman_if_disable_rx(fman_intf);
+       /* Disable promiscuous mode */
+       fman_if_promiscuous_disable(fman_intf);
+       /* Disable multicast */
+       fman_if_reset_mcast_filter_table(fman_intf);
+       /* Reset interface statistics */
+       fman_if_stats_reset(fman_intf);
 
        return 0;
 }
@@ -147,6 +421,20 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
 
        dpaa_eth_dev_close(dev);
 
+       /* release configuration memory */
+       if (dpaa_intf->fc_conf)
+               rte_free(dpaa_intf->fc_conf);
+
+       rte_free(dpaa_intf->rx_queues);
+       dpaa_intf->rx_queues = NULL;
+
+       rte_free(dpaa_intf->tx_queues);
+       dpaa_intf->tx_queues = NULL;
+
+       /* free memory for storing MAC addresses */
+       rte_free(dev->data->mac_addrs);
+       dev->data->mac_addrs = NULL;
+
        dev->dev_ops = NULL;
        dev->rx_pkt_burst = NULL;
        dev->tx_pkt_burst = NULL;