X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=52eb6df31052955351e13542ab6f508ce103ab98;hb=66689dc09f0cf92af03abcba58b7474fa8ac847e;hp=73d17f7b3c61bac25b0f96fb907be9793413c1a6;hpb=d61138d4f0e2927cc1d0fef6d810fa7f5d1161a1;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 73d17f7b3c..52eb6df310 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -18,6 +18,7 @@ #include #include #include +#include "rte_dpaa2_mempool.h" #include "dpaa2_pmd_logs.h" #include @@ -74,6 +75,9 @@ int dpaa2_timestamp_dynfield_offset = -1; /* Enable error queue */ bool dpaa2_enable_err_queue; +#define MAX_NB_RX_DESC 11264 +int total_nb_rx_desc; + struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; uint8_t page_id; /* dpni statistics page id */ @@ -143,7 +147,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) PMD_INIT_FUNC_TRACE(); if (mask & RTE_ETH_VLAN_FILTER_MASK) { - /* VLAN Filter not avaialble */ + /* VLAN Filter not available */ if (!priv->max_vlan_filters) { DPAA2_PMD_INFO("VLAN filter not available"); return -ENOTSUP; @@ -254,6 +258,7 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G | RTE_ETH_LINK_SPEED_10G; + dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; dev_info->max_hash_mac_addrs = 0; dev_info->max_vfs = 0; @@ -394,6 +399,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) if (dpaa2_enable_err_queue) { priv->rx_err_vq = rte_zmalloc("dpni_rx_err", sizeof(struct dpaa2_queue), 0); + if (!priv->rx_err_vq) + goto fail; dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; dpaa2_q->q_storage = rte_malloc("err_dq_storage", @@ -503,8 +510,7 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) /* cleaning up queue storage */ for (i = 0; i < priv->nb_rx_queues; i++) { dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; - if (dpaa2_q->q_storage) - rte_free(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); } /* cleanup tx queue cscn */ for (i = 0; i < priv->nb_tx_queues; i++) { @@ -663,6 +669,30 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK); + if (eth_conf->lpbk_mode) { + ret = dpaa2_dev_recycle_config(dev); + if (ret) { + DPAA2_PMD_ERR("Error to configure %s to recycle port.", + dev->data->name); + + return ret; + } + } else { + /** User may disable loopback mode by calling + * "dev_configure" with lpbk_mode cleared. + * No matter the port was configured recycle or not, + * recycle de-configure is called here. + * If port is not recycled, the de-configure will return directly. + */ + ret = dpaa2_dev_recycle_deconfig(dev); + if (ret) { + DPAA2_PMD_ERR("Error to de-configure recycle port %s.", + dev->data->name); + + return ret; + } + } + dpaa2_tm_init(dev); return 0; @@ -693,6 +723,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", dev, rx_queue_id, mb_pool, rx_conf); + total_nb_rx_desc += nb_rx_desc; + if (total_nb_rx_desc > MAX_NB_RX_DESC) { + DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers", + MAX_NB_RX_DESC); + DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script"); + } + /* Rx deferred start is not supported */ if (rx_conf->rx_deferred_start) { DPAA2_PMD_ERR("%p:Rx deferred start not supported", @@ -701,9 +738,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, } if (!priv->bp_list || priv->bp_list->mp != mb_pool) { + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + ret = rte_dpaa2_bpid_info_init(mb_pool); + if (ret) + return ret; + } bpid = mempool_to_bpid(mb_pool); - ret = dpaa2_attach_bp_list(priv, - rte_dpaa2_bpid_info[bpid].bp_list); + ret = dpaa2_attach_bp_list(priv, dpni, + rte_dpaa2_bpid_info[bpid].bp_list); if (ret) return ret; } @@ -841,6 +883,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, struct dpni_queue tx_conf_cfg; struct dpni_queue tx_flow_cfg; uint8_t options = 0, flow_id; + uint16_t channel_id; struct dpni_queue_id qid; uint32_t tc_id; int ret; @@ -866,20 +909,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); - tc_id = tx_queue_id; - flow_id = 0; - - ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, - tc_id, flow_id, options, &tx_flow_cfg); - if (ret) { - DPAA2_PMD_ERR("Error in setting the tx flow: " - "tc_id=%d, flow=%d err=%d", - tc_id, flow_id, ret); - return -1; - } - - dpaa2_q->flow_id = flow_id; - if (tx_queue_id == 0) { /*Set tx-conf and error configuration*/ if (priv->flags & DPAA2_TX_CONF_ENABLE) @@ -896,10 +925,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, return -1; } } + + tc_id = tx_queue_id % priv->num_tx_tc; + channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels; + flow_id = 0; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); + return -1; + } + + dpaa2_q->flow_id = flow_id; + dpaa2_q->tc_index = tc_id; ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX, dpaa2_q->tc_index, + DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index), dpaa2_q->flow_id, &tx_flow_cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); @@ -915,7 +960,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; cong_notif_cfg.threshold_entry = nb_tx_desc; /* Notify that the queue is not congested when the data in - * the queue is below this thershold.(90% of value) + * the queue is below this threshold.(90% of value) */ cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; @@ -931,7 +976,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, - tc_id, + ((channel_id << 8) | tc_id), &cong_notif_cfg); if (ret) { DPAA2_PMD_ERR( @@ -948,7 +993,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, options = options | DPNI_QUEUE_OPT_USER_CTX; tx_conf_cfg.user_context = (size_t)(dpaa2_q); ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); if (ret) { DPAA2_PMD_ERR("Error in setting the tx conf flow: " @@ -959,7 +1004,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index), dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); if (ret) { DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); @@ -983,6 +1028,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) memset(&cfg, 0, sizeof(struct dpni_queue)); PMD_INIT_FUNC_TRACE(); + + total_nb_rx_desc -= dpaa2_q->nb_desc; + if (dpaa2_q->cgid != 0xff) { options = DPNI_QUEUE_OPT_CLEAR_CGID; cfg.cgid = dpaa2_q->cgid; @@ -1057,7 +1105,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) * Dpaa2 link Interrupt handler * * @param param - * The address of parameter (struct rte_eth_dev *) regsitered before. + * The address of parameter (struct rte_eth_dev *) registered before. * * @return * void @@ -1138,7 +1186,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpni_queue cfg; struct dpni_error_cfg err_cfg; - uint16_t qdid; struct dpni_queue_id qid; struct dpaa2_queue *dpaa2_q; int ret, i; @@ -1148,7 +1195,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev) intr_handle = dpaa2_dev->intr_handle; PMD_INIT_FUNC_TRACE(); - ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", @@ -1159,14 +1205,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev) /* Power up the phy. Needed to make the link go UP */ dpaa2_dev_set_link_up(dev); - ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, - DPNI_QUEUE_TX, &qdid); - if (ret) { - DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); - return ret; - } - priv->qdid = qdid; - for (i = 0; i < data->nb_rx_queues; i++) { dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, @@ -1251,7 +1289,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; - struct rte_intr_handle *intr_handle = dev->intr_handle; + struct rte_device *rdev = dev->device; + struct rte_intr_handle *intr_handle; + struct rte_dpaa2_device *dpaa2_dev; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = dpaa2_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -1961,7 +2004,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) } /*changing tx burst function to avoid any more enqueues */ - dev->tx_pkt_burst = dummy_dev_tx; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; /* Loop while dpni_disable() attempts to drain the egress FQs * and confirm them back to us. @@ -2235,7 +2278,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, ocfg.oa = 1; /* Late arrival window size disabled */ ocfg.olws = 0; - /* ORL resource exhaustaion advance NESN disabled */ + /* ORL resource exhaustion advance NESN disabled */ ocfg.oeane = 0; /* Loose ordering enabled */ ocfg.oloe = 1; @@ -2583,6 +2626,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return -1; } + if (eth_dev->data->dev_conf.lpbk_mode) + dpaa2_dev_recycle_deconfig(eth_dev); + /* Clean the device first */ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); if (ret) { @@ -2600,9 +2646,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } priv->num_rx_tc = attr.num_rx_tcs; + priv->num_tx_tc = attr.num_tx_tcs; priv->qos_entries = attr.qos_entries; priv->fs_entries = attr.fs_entries; priv->dist_queues = attr.num_queues; + priv->num_channels = attr.num_channels; + priv->channel_inuse = 0; + rte_spinlock_init(&priv->lpbk_qp_lock); /* only if the custom CG is enabled */ if (attr.options & DPNI_OPT_CUSTOM_CG) @@ -2616,8 +2666,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) for (i = 0; i < attr.num_rx_tcs; i++) priv->nb_rx_queues += attr.num_queues; - /* Using number of TX queues as number of TX TCs */ - priv->nb_tx_queues = attr.num_tx_tcs; + priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels; DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", priv->num_rx_tc, priv->nb_rx_queues, @@ -2719,13 +2768,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } eth_dev->tx_pkt_burst = dpaa2_dev_tx; - /*Init fields w.r.t. classficaition*/ + /* Init fields w.r.t. classification */ memset(&priv->extract.qos_key_extract, 0, sizeof(struct dpaa2_key_extract)); priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); if (!priv->extract.qos_extract_param) { DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " - " classificaiton ", ret); + " classification ", ret); goto init_err; } priv->extract.qos_key_extract.key_info.ipv4_src_offset = @@ -2743,7 +2792,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->extract.tc_extract_param[i] = (size_t)rte_malloc(NULL, 256, 64); if (!priv->extract.tc_extract_param[i]) { - DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification", ret); goto init_err; } @@ -2788,7 +2837,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return ret; } } - RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n", + eth_dev->data->name, dpaa2_dev->ep_name); + return 0; init_err: dpaa2_dev_close(eth_dev);