net/mlx5: share Rx queue indirection table code
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index c5b9ac1..b0f2023 100644 (file)
@@ -259,6 +259,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
                dev->data->scattered_rx = 1;
        }
 
+       if (!(default_q || fmc_q)) {
+               if (dpaa_fm_config(dev,
+                       eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
+                       dpaa_write_fm_config_to_file();
+                       DPAA_PMD_ERR("FM port configuration: Failed\n");
+                       return -1;
+               }
+               dpaa_write_fm_config_to_file();
+       }
+
        /* if the interrupts were configured on this devices*/
        if (intr_handle && intr_handle->fd) {
                if (dev->data->dev_conf.intr_conf.lsc != 0)
@@ -334,6 +344,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       if (!(default_q || fmc_q))
+               dpaa_write_fm_config_to_file();
+
        /* Change tx callback to the real one */
        if (dpaa_intf->cgr_tx)
                dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
@@ -351,7 +364,8 @@ static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       fman_if_disable_rx(fif);
+       if (!fif->is_shared_mac)
+               fman_if_disable_rx(fif);
        dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 }
 
@@ -721,6 +735,55 @@ static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
        return 0;
 }
 
+static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+       struct fman_if_ic_params icp;
+       uint32_t fd_offset;
+       uint32_t bp_size;
+
+       memset(&icp, 0, sizeof(icp));
+       /* set ICEOF for to the default value , which is 0*/
+       icp.iciof = DEFAULT_ICIOF;
+       icp.iceof = DEFAULT_RX_ICEOF;
+       icp.icsz = DEFAULT_ICSZ;
+       fman_if_set_ic_params(dev->process_private, &icp);
+
+       fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+       fman_if_set_fdoff(dev->process_private, fd_offset);
+
+       /* Buffer pool size should be equal to Dataroom Size*/
+       bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
+
+       fman_if_set_bp(dev->process_private,
+                      dpaa_intf->bp_info->mp->size,
+                      dpaa_intf->bp_info->bpid, bp_size);
+}
+
+static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
+                                            int8_t vsp_id, uint32_t bpid)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+       struct fman_if *fif = dev->process_private;
+
+       if (fif->num_profiles) {
+               if (vsp_id < 0)
+                       vsp_id = fif->base_profile_id;
+       } else {
+               if (vsp_id < 0)
+                       vsp_id = 0;
+       }
+
+       if (dpaa_intf->vsp_bpid[vsp_id] &&
+               bpid != dpaa_intf->vsp_bpid[vsp_id]) {
+               DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
+
+               return -1;
+       }
+
+       return 0;
+}
+
 static
 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                            uint16_t nb_desc,
@@ -756,6 +819,20 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
                        queue_idx, rxq->fqid);
 
+       if (!fif->num_profiles) {
+               if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+                       dpaa_intf->bp_info->mp != mp) {
+                       DPAA_PMD_WARN("Multiple pools on same interface not"
+                                     " supported");
+                       return -EINVAL;
+               }
+       } else {
+               if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
+                       DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
+                       return -EINVAL;
+               }
+       }
+
        /* Max packet can fit in single buffer */
        if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
                ;
@@ -778,36 +855,40 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                     buffsz - RTE_PKTMBUF_HEADROOM);
        }
 
-       if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
-               struct fman_if_ic_params icp;
-               uint32_t fd_offset;
-               uint32_t bp_size;
+       dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
 
-               if (!mp->pool_data) {
-                       DPAA_PMD_ERR("Not an offloaded buffer pool!");
-                       return -1;
+       /* For shared interface, it's done in kernel, skip.*/
+       if (!fif->is_shared_mac)
+               dpaa_fman_if_pool_setup(dev);
+
+       if (fif->num_profiles) {
+               int8_t vsp_id = rxq->vsp_id;
+
+               if (vsp_id >= 0) {
+                       ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
+                                       DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
+                                       fif);
+                       if (ret) {
+                               DPAA_PMD_ERR("dpaa_port_vsp_update failed");
+                               return ret;
+                       }
+               } else {
+                       DPAA_PMD_INFO("Base profile is associated to"
+                               " RXQ fqid:%d\r\n", rxq->fqid);
+                       if (fif->is_shared_mac) {
+                               DPAA_PMD_ERR("Fatal: Base profile is associated"
+                                            " to shared interface on DPDK.");
+                               return -EINVAL;
+                       }
+                       dpaa_intf->vsp_bpid[fif->base_profile_id] =
+                               DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
                }
-               dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
-
-               memset(&icp, 0, sizeof(icp));
-               /* set ICEOF for to the default value , which is 0*/
-               icp.iciof = DEFAULT_ICIOF;
-               icp.iceof = DEFAULT_RX_ICEOF;
-               icp.icsz = DEFAULT_ICSZ;
-               fman_if_set_ic_params(fif, &icp);
-
-               fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
-               fman_if_set_fdoff(fif, fd_offset);
-
-               /* Buffer pool size should be equal to Dataroom Size*/
-               bp_size = rte_pktmbuf_data_room_size(mp);
-               fman_if_set_bp(fif, mp->size,
-                              dpaa_intf->bp_info->bpid, bp_size);
-               dpaa_intf->valid = 1;
-               DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
-                               dpaa_intf->name, fd_offset,
-                               fman_if_get_fdoff(fif));
+       } else {
+               dpaa_intf->vsp_bpid[0] =
+                       DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
        }
+
+       dpaa_intf->valid = 1;
        DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
                fman_if_get_sg_enable(fif),
                dev->data->dev_conf.rxmode.max_rx_pkt_len);
@@ -1222,6 +1303,41 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
        return ret;
 }
 
+static int
+dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
+                        struct rte_eth_rss_conf *rss_conf)
+{
+       struct rte_eth_dev_data *data = dev->data;
+       struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!(default_q || fmc_q)) {
+               if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
+                       DPAA_PMD_ERR("FM port configuration: Failed\n");
+                       return -1;
+               }
+               eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
+       } else {
+               DPAA_PMD_ERR("Function not supported\n");
+               return -ENOTSUP;
+       }
+       return 0;
+}
+
+static int
+dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+                          struct rte_eth_rss_conf *rss_conf)
+{
+       struct rte_eth_dev_data *data = dev->data;
+       struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+       /* dpaa does not support rss_key, so length should be 0*/
+       rss_conf->rss_key_len = 0;
+       rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
+       return 0;
+}
+
 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
                                      uint16_t queue_id)
 {
@@ -1337,6 +1453,8 @@ static struct eth_dev_ops dpaa_devops = {
 
        .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
        .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
+       .rss_hash_update          = dpaa_dev_rss_hash_update,
+       .rss_hash_conf_get        = dpaa_dev_rss_hash_conf_get,
 };
 
 static bool
@@ -1604,6 +1722,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
        uint32_t cgrid_tx[MAX_DPAA_CORES];
        uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
+       int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
+       int8_t vsp_id = -1;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1623,11 +1743,24 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        memset((char *)dev_rx_fqids, 0,
                sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
 
+       memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
+
        /* Initialize Rx FQ's */
        if (default_q) {
                num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
        } else if (fmc_q) {
-               num_rx_fqs = 1;
+               num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
+                                               dev_vspids,
+                                               DPAA_MAX_NUM_PCD_QUEUES);
+               if (num_rx_fqs < 0) {
+                       DPAA_PMD_ERR("%s FMC initializes failed!",
+                               dpaa_intf->name);
+                       goto free_rx;
+               }
+               if (!num_rx_fqs) {
+                       DPAA_PMD_WARN("%s is not configured by FMC.",
+                               dpaa_intf->name);
+               }
        } else {
                /* FMCLESS mode, load balance to multiple cores.*/
                num_rx_fqs = rte_lcore_count();
@@ -1702,6 +1835,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
                else
                        fqid = dev_rx_fqids[loop];
 
+               vsp_id = dev_vspids[loop];
+
                if (dpaa_intf->cgr_rx)
                        dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
 
@@ -1710,6 +1845,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
                        fqid);
                if (ret)
                        goto free_rx;
+               dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
                dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
        }
        dpaa_intf->nb_rx_queues = num_rx_fqs;
@@ -1807,19 +1943,21 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
                fman_intf->mac_addr.addr_bytes[4],
                fman_intf->mac_addr.addr_bytes[5]);
 
-
-       /* Disable RX mode */
-       fman_if_discard_rx_errors(fman_intf);
-       fman_if_disable_rx(fman_intf);
-       /* Disable promiscuous mode */
-       fman_if_promiscuous_disable(fman_intf);
-       /* Disable multicast */
-       fman_if_reset_mcast_filter_table(fman_intf);
-       /* Reset interface statistics */
-       fman_if_stats_reset(fman_intf);
-       /* Disable SG by default */
-       fman_if_set_sg(fman_intf, 0);
-       fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
+       if (!fman_intf->is_shared_mac) {
+               /* Disable RX mode */
+               fman_if_discard_rx_errors(fman_intf);
+               fman_if_disable_rx(fman_intf);
+               /* Disable promiscuous mode */
+               fman_if_promiscuous_disable(fman_intf);
+               /* Disable multicast */
+               fman_if_reset_mcast_filter_table(fman_intf);
+               /* Reset interface statistics */
+               fman_if_stats_reset(fman_intf);
+               /* Disable SG by default */
+               fman_if_set_sg(fman_intf, 0);
+               fman_if_set_maxfrm(fman_intf,
+                                  RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
+       }
 
        return 0;
 
@@ -2048,6 +2186,11 @@ static void __attribute__((destructor(102))) dpaa_finish(void)
                                        if (dpaa_fm_deconfig(dpaa_intf, fif))
                                                DPAA_PMD_WARN("DPAA FM "
                                                        "deconfig failed\n");
+                               if (fif->num_profiles) {
+                                       if (dpaa_port_vsp_cleanup(dpaa_intf,
+                                                                 fif))
+                                               DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
+                               }
                        }
                }
                if (is_global_init)