common/sfc_efx/base: implement Tx control path for Riverhead
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index 492b658..02daa4d 100644 (file)
@@ -64,7 +64,7 @@ static uint64_t dev_tx_offloads_nodis =
                DEV_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
-enum pmd_dpaa2_ts dpaa2_enable_ts;
+bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
 
 struct rte_dpaa2_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -106,12 +106,6 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 
-void
-rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
-{
-       dpaa2_enable_ts = enable;
-}
-
 static int
 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
@@ -145,7 +139,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct fsl_mc_io *dpni = dev->process_private;
-       int ret;
+       int ret = 0;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -153,7 +147,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                /* VLAN Filter not avaialble */
                if (!priv->max_vlan_filters) {
                        DPAA2_PMD_INFO("VLAN filter not available");
-                       goto next_mask;
+                       return -ENOTSUP;
                }
 
                if (dev->data->dev_conf.rxmode.offloads &
@@ -166,14 +160,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                if (ret < 0)
                        DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
        }
-next_mask:
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_VLAN_EXTEND)
-                       DPAA2_PMD_INFO("VLAN extend offload not supported");
-       }
 
-       return 0;
+       return ret;
 }
 
 static int
@@ -253,8 +241,6 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        PMD_INIT_FUNC_TRACE();
 
-       dev_info->if_index = priv->hw_id;
-
        dev_info->max_mac_addrs = priv->max_mac_filters;
        dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
        dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
@@ -292,6 +278,77 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        return 0;
 }
 
+static int
+dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
+                       __rte_unused uint16_t queue_id,
+                       struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+       int ret = -EINVAL;
+       unsigned int i;
+       const struct burst_info {
+               uint64_t flags;
+               const char *output;
+       } rx_offload_map[] = {
+                       {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+                       {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+                       {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+                       {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+                       {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
+                       {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+                       {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
+                       {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+       };
+
+       /* Update Rx offload info */
+       for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+               if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                               rx_offload_map[i].output);
+                       ret = 0;
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int
+dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
+                       __rte_unused uint16_t queue_id,
+                       struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+       int ret = -EINVAL;
+       unsigned int i;
+       const struct burst_info {
+               uint64_t flags;
+               const char *output;
+       } tx_offload_map[] = {
+                       {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+                       {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+                       {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+                       {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+       };
+
+       /* Update Tx offload info */
+       for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+               if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                               tx_offload_map[i].output);
+                       ret = 0;
+                       break;
+               }
+       }
+       return ret;
+}
+
 static int
 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 {
@@ -453,7 +510,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        int rx_l4_csum_offload = false;
        int tx_l3_csum_offload = false;
        int tx_l4_csum_offload = false;
-       int ret;
+       int ret, tc_index;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -493,12 +550,16 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        }
 
        if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
-               ret = dpaa2_setup_flow_dist(dev,
-                               eth_conf->rx_adv_conf.rss_conf.rss_hf);
-               if (ret) {
-                       DPAA2_PMD_ERR("Unable to set flow distribution."
-                                     "Check queue config");
-                       return ret;
+               for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+                       ret = dpaa2_setup_flow_dist(dev,
+                                       eth_conf->rx_adv_conf.rss_conf.rss_hf,
+                                       tc_index);
+                       if (ret) {
+                               DPAA2_PMD_ERR(
+                                       "Unable to set flow distribution on tc%d."
+                                       "Check queue config", tc_index);
+                               return ret;
+                       }
                }
        }
 
@@ -527,7 +588,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 #if !defined(RTE_LIBRTE_IEEE1588)
        if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
 #endif
-       dpaa2_enable_ts = true;
+               dpaa2_enable_ts[dev->data->port_id] = true;
 
        if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
                tx_l3_csum_offload = true;
@@ -580,7 +641,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
                         uint16_t rx_queue_id,
                         uint16_t nb_rx_desc,
                         unsigned int socket_id __rte_unused,
-                        const struct rte_eth_rxconf *rx_conf __rte_unused,
+                        const struct rte_eth_rxconf *rx_conf,
                         struct rte_mempool *mb_pool)
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
@@ -597,6 +658,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
                        dev, rx_queue_id, mb_pool, rx_conf);
 
+       /* Rx deferred start is not supported */
+       if (rx_conf->rx_deferred_start) {
+               DPAA2_PMD_ERR("%p:Rx deferred start not supported",
+                               (void *)dev);
+               return -EINVAL;
+       }
+
        if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
                bpid = mempool_to_bpid(mb_pool);
                ret = dpaa2_attach_bp_list(priv,
@@ -607,6 +675,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
        dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
        dpaa2_q->bp_array = rte_dpaa2_bpid_info;
+       dpaa2_q->nb_desc = UINT16_MAX;
+       dpaa2_q->offloads = rx_conf->offloads;
 
        /*Get the flow id from given VQ id*/
        flow_id = dpaa2_q->flow_id;
@@ -659,7 +729,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
                struct dpni_taildrop taildrop;
 
                taildrop.enable = 1;
-
+               dpaa2_q->nb_desc = nb_rx_desc;
                /* Private CGR will use tail drop length as nb_rx_desc.
                 * for rest cases we can use standard byte based tail drop.
                 * There is no HW restriction, but number of CGRs are limited,
@@ -723,9 +793,9 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 static int
 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         uint16_t tx_queue_id,
-                        uint16_t nb_tx_desc __rte_unused,
+                        uint16_t nb_tx_desc,
                         unsigned int socket_id __rte_unused,
-                        const struct rte_eth_txconf *tx_conf __rte_unused)
+                        const struct rte_eth_txconf *tx_conf)
 {
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
@@ -742,6 +812,16 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
+       /* Tx deferred start is not supported */
+       if (tx_conf->tx_deferred_start) {
+               DPAA2_PMD_ERR("%p:Tx deferred start not supported",
+                               (void *)dev);
+               return -EINVAL;
+       }
+
+       dpaa2_q->nb_desc = UINT16_MAX;
+       dpaa2_q->offloads = tx_conf->offloads;
+
        /* Return if queue already configured */
        if (dpaa2_q->flow_id != 0xffff) {
                dev->data->tx_queues[tx_queue_id] = dpaa2_q;
@@ -755,11 +835,11 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        flow_id = 0;
 
        ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
-                            tc_id, flow_id, options, &tx_flow_cfg);
+                       tc_id, flow_id, options, &tx_flow_cfg);
        if (ret) {
                DPAA2_PMD_ERR("Error in setting the tx flow: "
-                             "tc_id=%d, flow=%d err=%d",
-                             tc_id, flow_id, ret);
+                       "tc_id=%d, flow=%d err=%d",
+                       tc_id, flow_id, ret);
                        return -1;
        }
 
@@ -795,12 +875,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
                struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
 
+               dpaa2_q->nb_desc = nb_tx_desc;
+
                cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
-               cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
+               cong_notif_cfg.threshold_entry = nb_tx_desc;
                /* Notify that the queue is not congested when the data in
                 * the queue is below this thershold.
                 */
-               cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
+               cong_notif_cfg.threshold_exit = nb_tx_desc - 24;
                cong_notif_cfg.message_ctx = 0;
                cong_notif_cfg.message_iova =
                                (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
@@ -981,8 +1063,7 @@ dpaa2_interrupt_handler(void *param)
                clear = DPNI_IRQ_EVENT_LINK_CHANGED;
                dpaa2_dev_link_update(dev, 0);
                /* calling all the apps registered for link status event */
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                                             NULL);
+               rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 out:
        ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
@@ -1984,22 +2065,31 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
                          struct rte_eth_rss_conf *rss_conf)
 {
        struct rte_eth_dev_data *data = dev->data;
+       struct dpaa2_dev_priv *priv = data->dev_private;
        struct rte_eth_conf *eth_conf = &data->dev_conf;
-       int ret;
+       int ret, tc_index;
 
        PMD_INIT_FUNC_TRACE();
 
        if (rss_conf->rss_hf) {
-               ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
-               if (ret) {
-                       DPAA2_PMD_ERR("Unable to set flow dist");
-                       return ret;
+               for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+                       ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
+                               tc_index);
+                       if (ret) {
+                               DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
+                                       tc_index);
+                               return ret;
+                       }
                }
        } else {
-               ret = dpaa2_remove_flow_dist(dev, 0);
-               if (ret) {
-                       DPAA2_PMD_ERR("Unable to remove flow dist");
-                       return ret;
+               for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
+                       ret = dpaa2_remove_flow_dist(dev, tc_index);
+                       if (ret) {
+                               DPAA2_PMD_ERR(
+                                       "Unable to remove flow dist on tc%d",
+                                       tc_index);
+                               return ret;
+                       }
                }
        }
        eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
@@ -2169,6 +2259,43 @@ dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
        return ret;
 }
 
+static void
+dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct dpaa2_queue *rxq;
+
+       rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mb_pool;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_desc;
+
+       qinfo->conf.rx_free_thresh = 1;
+       qinfo->conf.rx_drop_en = 1;
+       qinfo->conf.rx_deferred_start = 0;
+       qinfo->conf.offloads = rxq->offloads;
+}
+
+static void
+dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct dpaa2_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_desc;
+       qinfo->conf.tx_thresh.pthresh = 0;
+       qinfo->conf.tx_thresh.hthresh = 0;
+       qinfo->conf.tx_thresh.wthresh = 0;
+
+       qinfo->conf.tx_free_thresh = 0;
+       qinfo->conf.tx_rs_thresh = 0;
+       qinfo->conf.offloads = txq->offloads;
+       qinfo->conf.tx_deferred_start = 0;
+}
+
 static struct eth_dev_ops dpaa2_ethdev_ops = {
        .dev_configure    = dpaa2_eth_dev_configure,
        .dev_start            = dpaa2_dev_start,
@@ -2199,7 +2326,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .rx_queue_release  = dpaa2_dev_rx_queue_release,
        .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
        .tx_queue_release  = dpaa2_dev_tx_queue_release,
-       .rx_queue_count       = dpaa2_dev_rx_queue_count,
+       .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
+       .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
        .flow_ctrl_get        = dpaa2_flow_ctrl_get,
        .flow_ctrl_set        = dpaa2_flow_ctrl_set,
        .mac_addr_add         = dpaa2_dev_add_mac_addr,
@@ -2208,6 +2336,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .rss_hash_update      = dpaa2_dev_rss_hash_update,
        .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
        .filter_ctrl          = dpaa2_dev_flow_ctrl,
+       .rxq_info_get         = dpaa2_rxq_info_get,
+       .txq_info_get         = dpaa2_txq_info_get,
 #if defined(RTE_LIBRTE_IEEE1588)
        .timesync_enable      = dpaa2_timesync_enable,
        .timesync_disable     = dpaa2_timesync_disable,
@@ -2352,6 +2482,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
                 * plugged.
                 */
                eth_dev->dev_ops = &dpaa2_ethdev_ops;
+               eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
                if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
                        eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
                else if (dpaa2_get_devargs(dev->devargs,
@@ -2392,6 +2523,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        priv->num_rx_tc = attr.num_rx_tcs;
+       priv->qos_entries = attr.qos_entries;
+       priv->fs_entries = attr.fs_entries;
+       priv->dist_queues = attr.num_queues;
+
        /* only if the custom CG is enabled */
        if (attr.options & DPNI_OPT_CUSTOM_CG)
                priv->max_cgs = attr.num_cgs;
@@ -2610,11 +2745,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->process_private = NULL;
        rte_free(dpni);
 
-       for (i = 0; i < MAX_TCS; i++) {
-               if (priv->extract.tc_extract_param[i])
-                       rte_free((void *)
-                               (size_t)priv->extract.tc_extract_param[i]);
-       }
+       for (i = 0; i < MAX_TCS; i++)
+               rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
 
        if (priv->extract.qos_extract_param)
                rte_free((void *)(size_t)priv->extract.qos_extract_param);