ethdev: separate driver APIs
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index 7756c4e..09a11d6 100644 (file)
@@ -1,41 +1,15 @@
-/*-
- *   BSD LICENSE
+/* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP.
+ *   Copyright 2016 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <time.h>
 #include <net/if.h>
 
 #include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
@@ -51,6 +25,7 @@
 #include <dpaa2_hw_dpio.h>
 #include <mc/fsl_dpmng.h>
 #include "dpaa2_ethdev.h"
+#include <fsl_qbman_debug.h>
 
 struct rte_dpaa2_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -162,7 +137,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
        return ret;
 }
 
-static void
+static int
 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -172,6 +147,12 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        PMD_INIT_FUNC_TRACE();
 
        if (mask & ETH_VLAN_FILTER_MASK) {
+               /* VLAN Filter not avaialble */
+               if (!priv->max_vlan_filters) {
+                       RTE_LOG(INFO, PMD, "VLAN filter not available\n");
+                       goto next_mask;
+               }
+
                if (dev->data->dev_conf.rxmode.hw_vlan_filter)
                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
                                                      priv->token, true);
@@ -182,12 +163,14 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
                                ret);
        }
-
+next_mask:
        if (mask & ETH_VLAN_EXTEND_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_extend)
                        RTE_LOG(INFO, PMD,
                                "VLAN extend offload not supported\n");
        }
+
+       return 0;
 }
 
 static int
@@ -344,8 +327,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 
        if (eth_conf->rxmode.jumbo_frame == 1) {
                if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
-                       ret = dpaa2_dev_mtu_set(dev,
-                                       eth_conf->rxmode.max_rx_pkt_len);
+                       ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+                               priv->token, eth_conf->rxmode.max_rx_pkt_len);
                        if (ret) {
                                PMD_INIT_LOG(ERR,
                                             "unable to set mtu. check config\n");
@@ -397,6 +380,25 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                return ret;
        }
 
+       /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
+        * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
+        * to 0 for LS2 in the hardware thus disabling data/annotation
+        * stashing. For LX2 this is fixed in hardware and thus hash result and
+        * parse results can be received in FD using this option.
+        */
+       if (dpaa2_svr_family == SVR_LX2160A) {
+               ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+                                      DPNI_FLCTYPE_HASH, true);
+               if (ret) {
+                       PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n",
+                                    ret);
+                       return ret;
+               }
+       }
+
+       if (eth_conf->rxmode.hw_vlan_filter)
+               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+
        /* update the current status */
        dpaa2_dev_link_update(dev, 0);
 
@@ -416,7 +418,6 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-       struct mc_soc_version mc_plat_info = {0};
        struct dpaa2_queue *dpaa2_q;
        struct dpni_queue cfg;
        uint8_t options = 0;
@@ -426,8 +427,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
-                    dev, rx_queue_id, mb_pool, rx_conf);
+       PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
+                   dev, rx_queue_id, mb_pool, rx_conf);
 
        if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
                bpid = mempool_to_bpid(mb_pool);
@@ -448,18 +449,20 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        /*if ls2088 or rev2 device, enable the stashing */
 
-       if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
-               PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
-
-       if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) {
+       if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
                options |= DPNI_QUEUE_OPT_FLC;
                cfg.flc.stash_control = true;
                cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
                /* 00 00 00 - last 6 bit represent annotation, context stashing,
-                * data stashing setting 01 01 00 (0x14) to enable
-                * 1 line data, 1 line annotation
+                * data stashing setting 01 01 00 (0x14)
+                * (in following order ->DS AS CS)
+                * to enable 1 line data, 1 line annotation.
+                * For LX2, this setting should be 01 00 00 (0x10)
                 */
-               cfg.flc.value |= 0x14;
+               if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
+                       cfg.flc.value |= 0x10;
+               else
+                       cfg.flc.value |= 0x14;
        }
        ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
                             dpaa2_q->tc_index, flow_id, options, &cfg);
@@ -476,8 +479,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
                taildrop.threshold = CONG_THRESHOLD_RX_Q;
                taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
                taildrop.oal = CONG_RX_OAL;
-               PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
-                            rx_queue_id);
+               PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d",
+                           rx_queue_id);
                ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
                                        DPNI_CP_QUEUE, DPNI_QUEUE_RX,
                                        dpaa2_q->tc_index, flow_id, &taildrop);
@@ -512,8 +515,10 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_FUNC_TRACE();
 
        /* Return if queue already configured */
-       if (dpaa2_q->flow_id != 0xffff)
+       if (dpaa2_q->flow_id != 0xffff) {
+               dev->data->tx_queues[tx_queue_id] = dpaa2_q;
                return 0;
+       }
 
        memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
        memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
@@ -590,6 +595,37 @@ dpaa2_dev_tx_queue_release(void *q __rte_unused)
        PMD_INIT_FUNC_TRACE();
 }
 
+static uint32_t
+dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       int32_t ret;
+       struct dpaa2_dev_priv *priv = dev->data->dev_private;
+       struct dpaa2_queue *dpaa2_q;
+       struct qbman_swp *swp;
+       struct qbman_fq_query_np_rslt state;
+       uint32_t frame_cnt = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+                       return -EINVAL;
+               }
+       }
+       swp = DPAA2_PER_LCORE_PORTAL;
+
+       dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+
+       if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
+               frame_cnt = qbman_fq_state_frame_count(&state);
+               RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+                       rx_queue_id, frame_cnt);
+       }
+       return frame_cnt;
+}
+
 static const uint32_t *
 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
 {
@@ -651,7 +687,7 @@ dpaa2_interrupt_handler(void *param)
                dpaa2_dev_link_update(dev, 0);
                /* calling all the apps registered for link status event */
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                                             NULL, NULL);
+                                             NULL);
        }
 out:
        ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
@@ -753,9 +789,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
                             "code = %d\n", ret);
                return ret;
        }
-       /* VLAN Offload Settings */
-       if (priv->max_vlan_filters)
-               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
        /* if the interrupts were configured on this devices*/
        if (intr_handle && (intr_handle->fd) &&
@@ -956,7 +989,8 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        int ret;
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
-       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+                               + VLAN_TAG_SIZE;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -974,16 +1008,18 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        else
                dev->data->dev_conf.rxmode.jumbo_frame = 0;
 
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
        /* Set the Max Rx frame length as 'mtu' +
         * Maximum Ethernet header length
         */
        ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
-                                       mtu + ETH_VLAN_HLEN);
+                                       frame_size);
        if (ret) {
                PMD_DRV_LOG(ERR, "setting the max frame length failed");
                return -1;
        }
-       PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
+       PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu);
        return 0;
 }
 
@@ -1061,7 +1097,7 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
                        "error: Setting the MAC ADDR failed %d\n", ret);
 }
 static
-void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
                         struct rte_eth_stats *stats)
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -1076,12 +1112,12 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
 
        if (!dpni) {
                RTE_LOG(ERR, PMD, "dpni is NULL\n");
-               return;
+               return -EINVAL;
        }
 
        if (!stats) {
                RTE_LOG(ERR, PMD, "stats is NULL\n");
-               return;
+               return -EINVAL;
        }
 
        /*Get Counters from page_0*/
@@ -1116,11 +1152,11 @@ void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
        stats->oerrors = value.page_2.egress_discarded_frames;
        stats->imissed = value.page_2.ingress_nobuffer_discards;
 
-       return;
+       return 0;
 
 err:
        RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
-       return;
+       return retcode;
 };
 
 static int
@@ -1634,6 +1670,75 @@ dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
+               int eth_rx_queue_id,
+               uint16_t dpcon_id,
+               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+       struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+       struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+       uint8_t flow_id = dpaa2_ethq->flow_id;
+       struct dpni_queue cfg;
+       uint8_t options;
+       int ret;
+
+       if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+               dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
+       else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
+               dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
+       else
+               return -EINVAL;
+
+       memset(&cfg, 0, sizeof(struct dpni_queue));
+       options = DPNI_QUEUE_OPT_DEST;
+       cfg.destination.type = DPNI_DEST_DPCON;
+       cfg.destination.id = dpcon_id;
+       cfg.destination.priority = queue_conf->ev.priority;
+
+       if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+               options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
+               cfg.destination.hold_active = 1;
+       }
+
+       options |= DPNI_QUEUE_OPT_USER_CTX;
+       cfg.user_context = (uint64_t)(dpaa2_ethq);
+
+       ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+                            dpaa2_ethq->tc_index, flow_id, options, &cfg);
+       if (ret) {
+               RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+               return ret;
+       }
+
+       memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
+
+       return 0;
+}
+
+int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
+               int eth_rx_queue_id)
+{
+       struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
+       struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
+       struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
+       uint8_t flow_id = dpaa2_ethq->flow_id;
+       struct dpni_queue cfg;
+       uint8_t options;
+       int ret;
+
+       memset(&cfg, 0, sizeof(struct dpni_queue));
+       options = DPNI_QUEUE_OPT_DEST;
+       cfg.destination.type = DPNI_DEST_NONE;
+
+       ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
+                            dpaa2_ethq->tc_index, flow_id, options, &cfg);
+       if (ret)
+               RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+
+       return ret;
+}
+
 static struct eth_dev_ops dpaa2_ethdev_ops = {
        .dev_configure    = dpaa2_eth_dev_configure,
        .dev_start            = dpaa2_dev_start,
@@ -1663,6 +1768,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .rx_queue_release  = dpaa2_dev_rx_queue_release,
        .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
        .tx_queue_release  = dpaa2_dev_tx_queue_release,
+       .rx_queue_count       = dpaa2_dev_rx_queue_count,
        .flow_ctrl_get        = dpaa2_flow_ctrl_get,
        .flow_ctrl_set        = dpaa2_flow_ctrl_set,
        .mac_addr_add         = dpaa2_dev_add_mac_addr,
@@ -1738,7 +1844,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        priv->nb_tx_queues = attr.num_tx_tcs;
 
        PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
-                   priv->num_tc, priv->nb_rx_queues, priv->nb_tx_queues);
+                   priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues);
 
        priv->hw = dpni_dev;
        priv->hw_id = hw_id;
@@ -1799,12 +1905,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        eth_dev->dev_ops = &dpaa2_ethdev_ops;
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 
        eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
        rte_fslmc_vfio_dmamap();
 
+       RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
        return 0;
 init_err:
        dpaa2_dev_uninit(eth_dev);
@@ -1865,6 +1971,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
 
+       RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
        return 0;
 }
 
@@ -1901,6 +2008,9 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
        dpaa2_dev->eth_dev = eth_dev;
        eth_dev->data->rx_mbuf_alloc_failed = 0;
 
+       if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
+               eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+
        /* Invoke PMD device initialization function */
        diag = dpaa2_dev_init(eth_dev);
        if (diag == 0)
@@ -1928,6 +2038,7 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
 }
 
 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
+       .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
        .drv_type = DPAA2_ETH,
        .probe = rte_dpaa2_probe,
        .remove = rte_dpaa2_remove,