X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_rxtx.c;h=7cfa73a8c7ca0e98654ff0808a3c349ce784d9f0;hb=81dba2b2ff61ae1b2f5b45d6a93ccd82bf0cbfdb;hp=3db91d8b15220c72919ebf6b91235f2035b4e5ad;hpb=ba9219fe42e19fa93a51474bbb7a1fd656dce9ac;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 3db91d8b15..7cfa73a8c7 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -350,7 +350,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, if (rte_dpaa2_mbuf_alloc_bulk( rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer"); - rte_pktmbuf_free(mbuf); return -1; } m = (struct rte_mbuf *)mb; @@ -382,8 +381,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd)); - /*free the original packet */ - rte_pktmbuf_free(mbuf); return 0; } @@ -422,7 +419,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) qbman_pull_desc_set_storage(&pulldesc, dq_storage, (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { - while (!qbman_check_command_complete(swp, + while (!qbman_check_command_complete( get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) ; clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); @@ -445,7 +442,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) * Also seems like the SWP is shared between the Ethernet Driver * and the SEC driver. */ - while (!qbman_check_command_complete(swp, dq_storage)) + while (!qbman_check_command_complete(dq_storage)) ; if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) clear_swp_active_dqs(q_storage->active_dpio_id); @@ -453,7 +450,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /* Loop until the dq_storage is updated with * new token by QBMAN */ - while (!qbman_result_has_new_result(swp, dq_storage)) + while (!qbman_check_new_result(dq_storage)) ; rte_prefetch0((void *)((uint64_t)(dq_storage + 1))); /* Check whether Last Pull command is Expired and @@ -486,7 +483,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { - while (!qbman_check_command_complete(swp, + while (!qbman_check_command_complete( get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) ; clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); @@ -524,7 +521,7 @@ uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { /* Function to transmit the frames to given device and VQ*/ - uint32_t loop; + uint32_t loop, retry_count; int32_t ret; struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; struct rte_mbuf *mi; @@ -559,8 +556,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /*Clear the unused FD fields before sending*/ while (nb_pkts) { /*Check if the queue is congested*/ - if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn)) - goto skip_tx; + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; @@ -575,39 +577,35 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) mp = mi->pool; } /* Not a hw_pkt pool allocated frame */ - if (!mp) { + if (unlikely(!mp || !priv->bp_list)) { PMD_TX_LOG(ERR, "err: no bpool attached"); - goto skip_tx; + goto send_n_return; } + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { PMD_TX_LOG(ERR, "non hw offload bufffer "); /* alloc should be from the default buffer pool * attached to this interface */ - if (priv->bp_list) { - bpid = priv->bp_list->buf_pool.bpid; - } else { - PMD_TX_LOG(ERR, - "err: no bpool attached"); - num_tx = 0; - goto skip_tx; - } + bpid = priv->bp_list->buf_pool.bpid; + if (unlikely((*bufs)->nb_segs > 1)) { PMD_TX_LOG(ERR, "S/G support not added" " for non hw offload buffer"); - goto skip_tx; + goto send_n_return; } if (eth_copy_mbuf_to_fd(*bufs, &fd_arr[loop], bpid)) { - bufs++; - continue; + goto send_n_return; } + /* free the original packet */ + rte_pktmbuf_free(*bufs); } else { bpid = mempool_to_bpid(mp); if (unlikely((*bufs)->nb_segs > 1)) { if (eth_mbuf_to_sg_fd(*bufs, &fd_arr[loop], bpid)) - goto skip_tx; + goto send_n_return; } else { eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid); @@ -617,7 +615,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } loop = 0; while (loop < frames_to_send) { - loop += qbman_swp_send_multiple(swp, &eqdesc, + loop += qbman_swp_enqueue_multiple(swp, &eqdesc, &fd_arr[loop], frames_to_send - loop); } @@ -625,6 +623,20 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_q->tx_pkts += frames_to_send; nb_pkts -= frames_to_send; } + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + while (i < loop) { + i += qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[i], loop - i); + } + num_tx += loop; + dpaa2_q->tx_pkts += loop; + } skip_tx: return num_tx; }