if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
- rte_pktmbuf_free(mbuf);
return -1;
}
m = (struct rte_mbuf *)mb;
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_OFFSET(fd),
DPAA2_GET_FD_LEN(fd));
- /*free the original packet */
- rte_pktmbuf_free(mbuf);
return 0;
}
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
- while (!qbman_check_command_complete(swp,
+ while (!qbman_check_command_complete(
get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
;
clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
* Also seems like the SWP is shared between the Ethernet Driver
* and the SEC driver.
*/
- while (!qbman_check_command_complete(swp, dq_storage))
+ while (!qbman_check_command_complete(dq_storage))
;
if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
clear_swp_active_dqs(q_storage->active_dpio_id);
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
- while (!qbman_result_has_new_result(swp, dq_storage))
+ while (!qbman_check_new_result(dq_storage))
;
rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
/* Check whether Last Pull command is Expired and
}
if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
- while (!qbman_check_command_complete(swp,
+ while (!qbman_check_command_complete(
get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
;
clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
/* Function to transmit the frames to given device and VQ*/
- uint32_t loop;
+ uint32_t loop, retry_count;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
struct rte_mbuf *mi;
/*Clear the unused FD fields before sending*/
while (nb_pkts) {
/*Check if the queue is congested*/
- if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn))
- goto skip_tx;
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto skip_tx;
+ }
frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
mp = mi->pool;
}
/* Not a hw_pkt pool allocated frame */
- if (!mp) {
+ if (unlikely(!mp || !priv->bp_list)) {
PMD_TX_LOG(ERR, "err: no bpool attached");
- goto skip_tx;
+ goto send_n_return;
}
+
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
PMD_TX_LOG(ERR, "non hw offload bufffer ");
/* alloc should be from the default buffer pool
* attached to this interface
*/
- if (priv->bp_list) {
- bpid = priv->bp_list->buf_pool.bpid;
- } else {
- PMD_TX_LOG(ERR,
- "err: no bpool attached");
- num_tx = 0;
- goto skip_tx;
- }
+ bpid = priv->bp_list->buf_pool.bpid;
+
if (unlikely((*bufs)->nb_segs > 1)) {
PMD_TX_LOG(ERR, "S/G support not added"
" for non hw offload buffer");
- goto skip_tx;
+ goto send_n_return;
}
if (eth_copy_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid)) {
- bufs++;
- continue;
+ goto send_n_return;
}
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
} else {
bpid = mempool_to_bpid(mp);
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop], bpid))
- goto skip_tx;
+ goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
&fd_arr[loop], bpid);
}
loop = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_send_multiple(swp, &eqdesc,
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop], frames_to_send - loop);
}
dpaa2_q->tx_pkts += frames_to_send;
nb_pkts -= frames_to_send;
}
+ return num_tx;
+
+send_n_return:
+ /* send any already prepared fd */
+ if (loop) {
+ unsigned int i = 0;
+
+ while (i < loop) {
+ i += qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[i], loop - i);
+ }
+ num_tx += loop;
+ dpaa2_q->tx_pkts += loop;
+ }
skip_tx:
return num_tx;
}