X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_rxtx.c;h=ef109a621abc4b67a03fd6520cd3bc0c7f7ba2e3;hb=0312753ef2f37a9fbe62b941f74cad2b20d776d9;hp=d1cfe95dcf52e6464de5468f2d8838a04fb17268;hpb=b3ec974c344c0528f34e475584ad3ea0f219fa85;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index d1cfe95dcf..ef109a621a 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -317,12 +317,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, struct qbman_sge *sgt, *sge = NULL; int i; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - temp = rte_pktmbuf_alloc(mbuf->pool); if (temp == NULL) { DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); @@ -389,13 +383,6 @@ static void __attribute__ ((noinline)) __attribute__((hot)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, struct qbman_fd *fd, uint16_t bpid) { - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - if (rte_vlan_insert(&mbuf)) { - rte_pktmbuf_free(mbuf); - return; - } - } - DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," @@ -428,12 +415,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, struct rte_mbuf *m; void *mb = NULL; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - if (rte_dpaa2_mbuf_alloc_bulk( rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); @@ -466,6 +447,12 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, return 0; } +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -473,7 +460,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; struct qbman_result *dq_storage, *dq_storage1 = NULL; uint32_t fqid = dpaa2_q->fqid; - int ret, num_rx = 0; + int ret, num_rx = 0, pull_size; uint8_t pending, status; struct qbman_swp *swp; const struct qbman_fd *fd, *next_fd; @@ -489,18 +476,18 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } } swp = DPAA2_PER_LCORE_ETHRX_PORTAL; - + pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? + DPAA2_DQRR_RING_SIZE : nb_pkts; if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; dq_storage = q_storage->dq_storage[q_storage->toggle]; - q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? - DPAA2_DQRR_RING_SIZE : nb_pkts; + q_storage->last_num_pkts = pull_size; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( get_swp_active_dqs( @@ -533,10 +520,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) q_storage->toggle ^= 1; dq_storage1 = q_storage->dq_storage[q_storage->toggle]; qbman_pull_desc_clear(&pulldesc); - qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage1, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); /* Check if the previous issued command is completed. * Also seems like the SWP is shared between the Ethernet Driver @@ -579,7 +566,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[num_rx] = eth_fd_to_mbuf(fd); bufs[num_rx]->port = dev->data->port_id; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) rte_vlan_strip(bufs[num_rx]); dq_storage++; @@ -737,8 +724,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) priv->bp_list->dpaa2_ops_index && (*bufs)->nb_segs == 1 && rte_mbuf_refcnt_read((*bufs)) == 1)) { - if (unlikely((*bufs)->ol_flags - & PKT_TX_VLAN_PKT)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { ret = rte_vlan_insert(bufs); if (ret) goto send_n_return; @@ -758,6 +747,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto send_n_return; } + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { DPAA2_PMD_WARN("Non DPAA2 buffer pool"); /* alloc should be from the default buffer pool