From: Nipun Gupta Date: Mon, 9 Apr 2018 10:22:51 +0000 (+0530) Subject: bus/fslmc: configure separate portal for Ethernet Rx X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=b3ec974c344c0528f34e475584ad3ea0f219fa85;p=dpdk.git bus/fslmc: configure separate portal for Ethernet Rx In case of Receive from Ethernet we add a new pull request (prefetch) but do not fetch the results from that pull request until next dequeue operation. This keeps the portal in busy mode. This patch updates the portals bifurcation to have separate portals to receive packets for Ethernet and all other devices to use a common portal. Signed-off-by: Nipun Gupta --- diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c index 881dd5f1d8..a741626684 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c @@ -350,7 +350,7 @@ dpaa2_affine_qbman_swp(void) } int -dpaa2_affine_qbman_swp_sec(void) +dpaa2_affine_qbman_ethrx_swp(void) { unsigned int lcore_id = rte_lcore_id(); uint64_t tid = syscall(SYS_gettid); @@ -361,35 +361,36 @@ dpaa2_affine_qbman_swp_sec(void) else if (lcore_id >= RTE_MAX_LCORE) return -1; - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { DPAA2_BUS_DP_INFO( "DPAA Portal=%p (%d) is being shared between thread" " %" PRIu64 " and current %" PRIu64 "\n", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, dpaa2_io_portal[lcore_id].sec_tid, tid); - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; rte_atomic16_inc(&dpaa2_io_portal - [lcore_id].sec_dpio_dev->ref_count); + [lcore_id].ethrx_dpio_dev->ref_count); dpaa2_io_portal[lcore_id].sec_tid = tid; DPAA2_BUS_DP_DEBUG( "Old Portal=%p (%d) affined thread" " - %" PRIu64 "\n", - dpaa2_io_portal[lcore_id].sec_dpio_dev, - dpaa2_io_portal[lcore_id].sec_dpio_dev->index, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev, + dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index, tid); return 0; } /* Populate the dpaa2_io_portal structure */ - dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id); + dpaa2_io_portal[lcore_id].ethrx_dpio_dev = + dpaa2_get_qbman_swp(lcore_id); - if (dpaa2_io_portal[lcore_id].sec_dpio_dev) { - RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev - = dpaa2_io_portal[lcore_id].sec_dpio_dev; + if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) { + RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev + = dpaa2_io_portal[lcore_id].ethrx_dpio_dev; dpaa2_io_portal[lcore_id].sec_tid = tid; return 0; } else { diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h index c0bd878243..d593eea74e 100644 --- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h +++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h @@ -13,7 +13,7 @@ struct dpaa2_io_portal_t { struct dpaa2_dpio_dev *dpio_dev; - struct dpaa2_dpio_dev *sec_dpio_dev; + struct dpaa2_dpio_dev *ethrx_dpio_dev; uint64_t net_tid; uint64_t sec_tid; void *eventdev; @@ -25,8 +25,8 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io); #define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev #define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal -#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev -#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal +#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev +#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal /* Variable to store DPAA2 platform type */ extern uint32_t dpaa2_svr_family; @@ -39,7 +39,7 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id); int dpaa2_affine_qbman_swp(void); /* Affine additional DPIO portal to current crypto processing thread */ -int dpaa2_affine_qbman_swp_sec(void); +int dpaa2_affine_qbman_ethrx_swp(void); /* allocate memory for FQ - dq storage */ int diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map index b7db074142..f519651ee9 100644 --- a/drivers/bus/fslmc/rte_bus_fslmc_version.map +++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map @@ -2,7 +2,6 @@ DPDK_17.05 { global: dpaa2_affine_qbman_swp; - dpaa2_affine_qbman_swp_sec; dpaa2_alloc_dpbp_dev; dpaa2_alloc_dq_storage; dpaa2_free_dpbp_dev; @@ -101,3 +100,10 @@ DPDK_18.02 { rte_fslmc_get_device_count; } DPDK_17.11; + +DPDK_18.05 { + global: + + dpaa2_affine_qbman_ethrx_swp; + +} DPDK_18.02; diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 784b96db88..23012e35ad 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1159,14 +1159,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, qbman_eq_desc_set_response(&eqdesc, 0, 0); qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; while (nb_ops) { frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; @@ -1307,14 +1307,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, const struct qbman_fd *fd; struct qbman_pull_desc pulldesc; - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 532de940c6..d1cfe95dcf 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -481,14 +481,15 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; struct rte_eth_dev *dev = dpaa2_q->dev; - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); if (ret) { DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_PORTAL; + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; dq_storage = q_storage->dq_storage[q_storage->toggle]; @@ -500,11 +501,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } while (1) { if (qbman_swp_pull(swp, &pulldesc)) { @@ -516,8 +518,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) break; } q_storage->active_dqs = dq_storage; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); } dq_storage = q_storage->active_dqs; @@ -583,11 +586,11 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) num_rx++; } while (pending); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } /* issue a volatile dequeue command for next pull */ while (1) { @@ -599,8 +602,8 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) break; } q_storage->active_dqs = dq_storage1; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); dpaa2_q->rx_pkts += num_rx;