X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa%2Fdpaa_ethdev.c;h=27d670f843d2cc197e63c95b09f6a356a9c6403e;hb=7ab475a0299ba379e1ac6efa3b29dbce2656289a;hp=d643514de6efb5a2777dccd2a5b4ebc34f683731;hpb=df96fd0d73955bdc7ca3909e772ff2ad903249c6;p=dpdk.git diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index d643514de6..27d670f843 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -49,6 +49,9 @@ #include #include +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ + /* Supported Rx offloads */ static uint64_t dev_rx_offloads_sup = DEV_RX_OFFLOAD_JUMBO_FRAME | @@ -483,9 +486,6 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev) if (dpaa_intf->cgr_rx) { for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++) qman_delete_cgr(&dpaa_intf->cgr_rx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid, - dpaa_intf->nb_rx_queues); } rte_free(dpaa_intf->cgr_rx); @@ -494,9 +494,6 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev) if (dpaa_intf->cgr_tx) { for (loop = 0; loop < MAX_DPAA_CORES; loop++) qman_delete_cgr(&dpaa_intf->cgr_tx[loop]); - - qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid, - MAX_DPAA_CORES); rte_free(dpaa_intf->cgr_tx); dpaa_intf->cgr_tx = NULL; } @@ -535,9 +532,11 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x", svr_ver, fman_ip_rev); - ret += 1; /* add the size of '\0' */ + if (ret < 0) + return -EINVAL; - if (fw_size < (uint32_t)ret) + ret += 1; /* add the size of '\0' */ + if (fw_size < (size_t)ret) return ret; else return 0; @@ -669,23 +668,30 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev, } static int dpaa_eth_link_update(struct rte_eth_dev *dev, - int wait_to_complete __rte_unused) + int wait_to_complete) { struct dpaa_if *dpaa_intf = dev->data->dev_private; struct rte_eth_link *link = &dev->data->dev_link; struct fman_if *fif = dev->process_private; struct __fman_if *__fif = container_of(fif, struct __fman_if, __if); int ret, ioctl_version; + uint8_t count; PMD_INIT_FUNC_TRACE(); ioctl_version = dpaa_get_ioctl_version_number(); - if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) { - ret = dpaa_get_link_status(__fif->node_name, link); - if (ret) - return ret; + for (count = 0; count <= MAX_REPEAT_TIME; count++) { + ret = dpaa_get_link_status(__fif->node_name, link); + if (ret) + return ret; + if (link->link_status == ETH_LINK_DOWN && + wait_to_complete) + rte_delay_ms(CHECK_INTERVAL); + else + break; + } } else { link->link_status = dpaa_intf->valid; } @@ -965,6 +971,12 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, } } + if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp && + dpaa_intf->bp_info->mp != mp) { + DPAA_PMD_WARN("Multiple pools on same interface not supported"); + return -EINVAL; + } + /* Max packet can fit in single buffer */ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { ; @@ -1511,12 +1523,19 @@ dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, { struct dpaa_if *dpaa_intf = dev->data->dev_private; struct qman_fq *rxq; + int ret; rxq = dev->data->rx_queues[queue_id]; qinfo->mp = dpaa_intf->bp_info->mp; qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_desc; + + /* Report the HW Rx buffer length to user */ + ret = fman_if_get_maxfrm(dev->process_private); + if (ret > 0) + qinfo->rx_buf_size = ret; + qinfo->conf.rx_free_thresh = 1; qinfo->conf.rx_drop_en = 1; qinfo->conf.rx_deferred_start = 0; @@ -2292,4 +2311,4 @@ static struct rte_dpaa_driver rte_dpaa_pmd = { }; RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); -RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE); +RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);