net/dpaa2: support Rx buffer size
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index a81c734..0e7ebf4 100644 (file)
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2020 NXP
+ *   Copyright 2016-2021 NXP
  *
  */
 
@@ -31,6 +31,8 @@
 
 #define DRIVER_LOOPBACK_MODE "drv_loopback"
 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
+#define DRIVER_TX_CONF "drv_tx_conf"
+#define DRIVER_ERROR_QUEUE  "drv_err_queue"
 #define CHECK_INTERVAL         100  /* 100ms */
 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
 
@@ -70,6 +72,9 @@ bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
 uint64_t dpaa2_timestamp_rx_dynflag;
 int dpaa2_timestamp_dynfield_offset = -1;
 
+/* Enable error queue */
+bool dpaa2_enable_err_queue;
+
 struct rte_dpaa2_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
        uint8_t page_id; /* dpni statistics page id */
@@ -363,7 +368,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
-       if (priv->tx_conf_en)
+       if (priv->flags & DPAA2_TX_CONF_ENABLE)
                tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
        else
                tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
@@ -390,6 +395,25 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
                        goto fail;
        }
 
+       if (dpaa2_enable_err_queue) {
+               priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
+                       sizeof(struct dpaa2_queue), 0);
+
+               dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
+               dpaa2_q->q_storage = rte_malloc("err_dq_storage",
+                                       sizeof(struct queue_storage_info_t) *
+                                       RTE_MAX_LCORE,
+                                       RTE_CACHE_LINE_SIZE);
+               if (!dpaa2_q->q_storage)
+                       goto fail;
+
+               memset(dpaa2_q->q_storage, 0,
+                      sizeof(struct queue_storage_info_t));
+               for (i = 0; i < RTE_MAX_LCORE; i++)
+                       if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
+                               goto fail;
+       }
+
        for (i = 0; i < priv->nb_tx_queues; i++) {
                mc_q->eth_data = dev->data;
                mc_q->flow_id = 0xffff;
@@ -401,7 +425,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
                        goto fail_tx;
        }
 
-       if (priv->tx_conf_en) {
+       if (priv->flags & DPAA2_TX_CONF_ENABLE) {
                /*Setup tx confirmation queues*/
                for (i = 0; i < priv->nb_tx_queues; i++) {
                        mc_q->eth_data = dev->data;
@@ -457,6 +481,14 @@ fail:
                rte_free(dpaa2_q->q_storage);
                priv->rx_vq[i--] = NULL;
        }
+
+       if (dpaa2_enable_err_queue) {
+               dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
+               if (dpaa2_q->q_storage)
+                       dpaa2_free_dq_storage(dpaa2_q->q_storage);
+               rte_free(dpaa2_q->q_storage);
+       }
+
        rte_free(mc_q);
        return -1;
 }
@@ -483,7 +515,7 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
                        dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
                        rte_free(dpaa2_q->cscn);
                }
-               if (priv->tx_conf_en) {
+               if (priv->flags & DPAA2_TX_CONF_ENABLE) {
                        /* cleanup tx conf queue storage */
                        for (i = 0; i < priv->nb_tx_queues; i++) {
                                dpaa2_q = (struct dpaa2_queue *)
@@ -638,6 +670,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
+       dpaa2_tm_init(dev);
+
        return 0;
 }
 
@@ -855,7 +889,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        if (tx_queue_id == 0) {
                /*Set tx-conf and error configuration*/
-               if (priv->tx_conf_en)
+               if (priv->flags & DPAA2_TX_CONF_ENABLE)
                        ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
                                                            priv->token,
                                                            DPNI_CONF_AFFINE);
@@ -888,9 +922,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
                cong_notif_cfg.threshold_entry = nb_tx_desc;
                /* Notify that the queue is not congested when the data in
-                * the queue is below this thershold.
+                * the queue is below this thershold.(90% of value)
                 */
-               cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
+               cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
                cong_notif_cfg.message_ctx = 0;
                cong_notif_cfg.message_iova =
                                (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
@@ -916,7 +950,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
        dev->data->tx_queues[tx_queue_id] = dpaa2_q;
 
-       if (priv->tx_conf_en) {
+       if (priv->flags & DPAA2_TX_CONF_ENABLE) {
                dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
                options = options | DPNI_QUEUE_OPT_USER_CTX;
                tx_conf_cfg.user_context = (size_t)(dpaa2_q);
@@ -1160,11 +1194,31 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
                dpaa2_q->fqid = qid.fqid;
        }
 
-       /*checksum errors, send them to normal path and set it in annotation */
-       err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
-       err_cfg.errors |= DPNI_ERROR_PHE;
+       if (dpaa2_enable_err_queue) {
+               ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+                                    DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
+               if (ret) {
+                       DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
+                                               ret);
+                       return ret;
+               }
+               dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
+               dpaa2_q->fqid = qid.fqid;
+               dpaa2_q->eth_data = dev->data;
+
+               err_cfg.errors =  DPNI_ERROR_DISC;
+               err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+       } else {
+               /* checksum errors, send them to normal path
+                * and set it in annotation
+                */
+               err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
 
-       err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
+               /* if packet with parse error are not to be dropped */
+               err_cfg.errors |= DPNI_ERROR_PHE;
+
+               err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
+       }
        err_cfg.set_frame_annotation = true;
 
        ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
@@ -1264,6 +1318,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
                return -1;
        }
 
+       dpaa2_tm_deinit(dev);
        dpaa2_flow_clean(dev);
        /* Clean the device first */
        ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
@@ -2313,12 +2368,18 @@ dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_rxq_info *qinfo)
 {
        struct dpaa2_queue *rxq;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+       uint16_t max_frame_length;
 
        rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
 
        qinfo->mp = rxq->mb_pool;
        qinfo->scattered_rx = dev->data->scattered_rx;
        qinfo->nb_desc = rxq->nb_desc;
+       if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
+                               &max_frame_length) == 0)
+               qinfo->rx_buf_size = max_frame_length;
 
        qinfo->conf.rx_free_thresh = 1;
        qinfo->conf.rx_drop_en = 1;
@@ -2345,6 +2406,14 @@ dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->conf.tx_deferred_start = 0;
 }
 
+static int
+dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
+{
+       *(const void **)ops = &dpaa2_tm_ops;
+
+       return 0;
+}
+
 static struct eth_dev_ops dpaa2_ethdev_ops = {
        .dev_configure    = dpaa2_eth_dev_configure,
        .dev_start            = dpaa2_dev_start,
@@ -2387,6 +2456,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .filter_ctrl          = dpaa2_dev_flow_ctrl,
        .rxq_info_get         = dpaa2_rxq_info_get,
        .txq_info_get         = dpaa2_txq_info_get,
+       .tm_ops_get           = dpaa2_tm_ops_get,
 #if defined(RTE_LIBRTE_IEEE1588)
        .timesync_enable      = dpaa2_timesync_enable,
        .timesync_disable     = dpaa2_timesync_disable,
@@ -2602,10 +2672,19 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        priv->max_vlan_filters = attr.vlan_filter_entries;
        priv->flags = 0;
 #if defined(RTE_LIBRTE_IEEE1588)
-       priv->tx_conf_en = 1;
-#else
-       priv->tx_conf_en = 0;
+       printf("DPDK IEEE1588 is enabled\n");
+       priv->flags |= DPAA2_TX_CONF_ENABLE;
 #endif
+       /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
+       if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
+               priv->flags |= DPAA2_TX_CONF_ENABLE;
+               DPAA2_PMD_INFO("TX_CONF Enabled");
+       }
+
+       if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
+               dpaa2_enable_err_queue = 1;
+               DPAA2_PMD_INFO("Enable error queue");
+       }
 
        /* Allocate memory for hardware structure for queues */
        ret = dpaa2_alloc_rx_tx_queues(eth_dev);
@@ -2638,7 +2717,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
        /* ... tx buffer layout ... */
        memset(&layout, 0, sizeof(struct dpni_buffer_layout));
-       if (priv->tx_conf_en) {
+       if (priv->flags & DPAA2_TX_CONF_ENABLE) {
                layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
                                 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
                layout.pass_timestamp = true;
@@ -2655,13 +2734,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
        /* ... tx-conf and error buffer layout ... */
        memset(&layout, 0, sizeof(struct dpni_buffer_layout));
-       if (priv->tx_conf_en) {
-               layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
-                                DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+       if (priv->flags & DPAA2_TX_CONF_ENABLE) {
+               layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
                layout.pass_timestamp = true;
-       } else {
-               layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
        }
+       layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
        layout.pass_frame_status = 1;
        ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
                                     DPNI_QUEUE_TX_CONFIRM, &layout);
@@ -2795,7 +2872,6 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
                eth_dev->data->dev_private = (void *)dev_priv;
                /* Store a pointer to eth_dev in dev_private */
                dev_priv->eth_dev = eth_dev;
-               dev_priv->tx_conf_en = 0;
        } else {
                eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
                if (!eth_dev) {
@@ -2848,5 +2924,7 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
                DRIVER_LOOPBACK_MODE "=<int> "
-               DRIVER_NO_PREFETCH_MODE "=<int>");
+               DRIVER_NO_PREFETCH_MODE "=<int>"
+               DRIVER_TX_CONF "=<int>"
+               DRIVER_ERROR_QUEUE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);