net/dpaa2: support QoS or FS table entry indexing
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index 03131b9..008e1c5 100644 (file)
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016 NXP
+ *   Copyright 2016-2020 NXP
  *
  */
 
@@ -45,6 +45,7 @@ static uint64_t dev_rx_offloads_sup =
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
+               DEV_RX_OFFLOAD_RSS_HASH |
                DEV_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
@@ -105,8 +106,6 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 
-int dpaa2_logtype_pmd;
-
 void
 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
 {
@@ -274,6 +273,22 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_vmdq_pools = ETH_16_POOLS;
        dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
+       dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
+       /* same is rx size for best perf */
+       dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
+
+       dev_info->default_rxportconf.nb_queues = 1;
+       dev_info->default_txportconf.nb_queues = 1;
+       dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
+       dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
+
+       if (dpaa2_svr_family == SVR_LX2160A) {
+               dev_info->speed_capa |= ETH_LINK_SPEED_25G |
+                               ETH_LINK_SPEED_40G |
+                               ETH_LINK_SPEED_50G |
+                               ETH_LINK_SPEED_100G;
+       }
+
        return 0;
 }
 
@@ -509,8 +524,10 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                return ret;
        }
 
+#if !defined(RTE_LIBRTE_IEEE1588)
        if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
-               dpaa2_enable_ts = true;
+#endif
+       dpaa2_enable_ts = true;
 
        if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
                tx_l3_csum_offload = true;
@@ -552,9 +569,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
-       /* update the current status */
-       dpaa2_dev_link_update(dev, 0);
-
        return 0;
 }
 
@@ -662,7 +676,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                                DPNI_CP_CONGESTION_GROUP,
                                                DPNI_QUEUE_RX,
                                                dpaa2_q->tc_index,
-                                               flow_id, &taildrop);
+                                               dpaa2_q->cgid, &taildrop);
                } else {
                        /*enabling per rx queue congestion control */
                        taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
@@ -689,7 +703,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
                                        DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
                                        dpaa2_q->tc_index,
-                                       flow_id, &taildrop);
+                                       dpaa2_q->cgid, &taildrop);
                } else {
                        ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
                                        DPNI_CP_QUEUE, DPNI_QUEUE_RX,
@@ -884,12 +898,12 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct qbman_fq_query_np_rslt state;
        uint32_t frame_cnt = 0;
 
-       PMD_INIT_FUNC_TRACE();
-
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
                ret = dpaa2_affine_qbman_swp();
                if (ret) {
-                       DPAA2_PMD_ERR("Failure in affining portal");
+                       DPAA2_PMD_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
                        return -EINVAL;
                }
        }
@@ -899,7 +913,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
        if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
                frame_cnt = qbman_fq_state_frame_count(&state);
-               DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+               DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
                                rx_queue_id, frame_cnt);
        }
        return frame_cnt;
@@ -1288,7 +1302,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        if (frame_size > RTE_ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.offloads &=
+               dev->data->dev_conf.rxmode.offloads |=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &=
@@ -1756,6 +1770,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
        /* changing tx burst function to start enqueues */
        dev->tx_pkt_burst = dpaa2_dev_tx;
        dev->data->dev_link.link_status = state.up;
+       dev->data->dev_link.link_speed = state.rate;
 
        if (state.up)
                DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
@@ -2328,7 +2343,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
                DPAA2_PMD_ERR("Memory allocation failed for dpni device");
                return -1;
        }
-       dpni_dev->regs = rte_mcp_ptr_list[0];
+       dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
        eth_dev->process_private = (void *)dpni_dev;
 
        /* For secondary processes, the primary has done all the work */
@@ -2377,6 +2392,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        priv->num_rx_tc = attr.num_rx_tcs;
+       priv->qos_entries = attr.qos_entries;
+       priv->fs_entries = attr.fs_entries;
+       priv->dist_queues = attr.num_queues;
+
        /* only if the custom CG is enabled */
        if (attr.options & DPNI_OPT_CUSTOM_CG)
                priv->max_cgs = attr.num_cgs;
@@ -2486,23 +2505,41 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
 
        /*Init fields w.r.t. classficaition*/
-       memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
+       memset(&priv->extract.qos_key_extract, 0,
+               sizeof(struct dpaa2_key_extract));
        priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
        if (!priv->extract.qos_extract_param) {
                DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
                            " classificaiton ", ret);
                goto init_err;
        }
+       priv->extract.qos_key_extract.key_info.ipv4_src_offset =
+               IP_ADDRESS_OFFSET_INVALID;
+       priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
+               IP_ADDRESS_OFFSET_INVALID;
+       priv->extract.qos_key_extract.key_info.ipv6_src_offset =
+               IP_ADDRESS_OFFSET_INVALID;
+       priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
+               IP_ADDRESS_OFFSET_INVALID;
+
        for (i = 0; i < MAX_TCS; i++) {
-               memset(&priv->extract.fs_key_cfg[i], 0,
-                       sizeof(struct dpkg_profile_cfg));
-               priv->extract.fs_extract_param[i] =
+               memset(&priv->extract.tc_key_extract[i], 0,
+                       sizeof(struct dpaa2_key_extract));
+               priv->extract.tc_extract_param[i] =
                        (size_t)rte_malloc(NULL, 256, 64);
-               if (!priv->extract.fs_extract_param[i]) {
+               if (!priv->extract.tc_extract_param[i]) {
                        DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
                                     ret);
                        goto init_err;
                }
+               priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
+                       IP_ADDRESS_OFFSET_INVALID;
+               priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
+                       IP_ADDRESS_OFFSET_INVALID;
+               priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
+                       IP_ADDRESS_OFFSET_INVALID;
+               priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
+                       IP_ADDRESS_OFFSET_INVALID;
        }
 
        ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
@@ -2577,10 +2614,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->process_private = NULL;
        rte_free(dpni);
 
-       for (i = 0; i < MAX_TCS; i++) {
-               if (priv->extract.fs_extract_param[i])
-                       rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
-       }
+       for (i = 0; i < MAX_TCS; i++)
+               rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
 
        if (priv->extract.qos_extract_param)
                rte_free((void *)(size_t)priv->extract.qos_extract_param);
@@ -2679,9 +2714,4 @@ RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
                DRIVER_LOOPBACK_MODE "=<int> "
                DRIVER_NO_PREFETCH_MODE "=<int>");
-RTE_INIT(dpaa2_pmd_init_log)
-{
-       dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
-       if (dpaa2_logtype_pmd >= 0)
-               rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);