X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_rxtx.c;h=ff9a696e0fb78dad7911647755227469bc96ef5f;hb=2f3b88fbabc580ba9501be900ba9e7f95508ac09;hp=c1ea33af4bce6b49176e43c2400162029f45c04b;hpb=a5fc38d422a7211a3153f49e7496fadaee12ecd0;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index c1ea33af4b..ff9a696e0f 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -136,7 +136,7 @@ static inline struct rte_mbuf *__attribute__((hot)) eth_fd_to_mbuf(const struct qbman_fd *fd) { struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( - DPAA2_GET_FD_ADDR(fd), + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); /* need to repopulated some of the fields, @@ -151,10 +151,11 @@ eth_fd_to_mbuf(const struct qbman_fd *fd) /* Parse the packet */ /* parse results are after the private - sw annotation area */ mbuf->packet_type = dpaa2_dev_rx_parse( - (uint64_t)(DPAA2_GET_FD_ADDR(fd)) + (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); - dpaa2_dev_rx_offload((uint64_t)(DPAA2_GET_FD_ADDR(fd)) + + dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE, mbuf); mbuf->next = NULL; @@ -170,14 +171,14 @@ eth_fd_to_mbuf(const struct qbman_fd *fd) return mbuf; } -static void __attribute__ ((noinline)) __attribute__((hot)) +static void __rte_noinline __attribute__((hot)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, struct qbman_fd *fd, uint16_t bpid) { /*Resetting the buffer pool id and offset field*/ fd->simple.bpid_offset = 0; - DPAA2_SET_FD_ADDR(fd, (mbuf->buf_addr)); + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf)); DPAA2_SET_FD_LEN(fd, mbuf->data_len); DPAA2_SET_FD_BPID(fd, bpid); DPAA2_SET_FD_OFFSET(fd, mbuf->data_off); @@ -191,6 +192,55 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf, DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); } + +static inline int __attribute__((hot)) +eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *m; + void *mb = NULL; + + if (rte_dpaa2_mbuf_alloc_bulk( + rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { + PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer"); + rte_pktmbuf_free(mbuf); + return -1; + } + m = (struct rte_mbuf *)mb; + memcpy((char *)m->buf_addr + mbuf->data_off, + (void *)((char *)mbuf->buf_addr + mbuf->data_off), + mbuf->pkt_len); + + /* Copy required fields */ + m->data_off = mbuf->data_off; + m->ol_flags = mbuf->ol_flags; + m->packet_type = mbuf->packet_type; + m->tx_offload = mbuf->tx_offload; + + /*Resetting the buffer pool id and offset field*/ + fd->simple.bpid_offset = 0; + + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m)); + DPAA2_SET_FD_LEN(fd, mbuf->data_len); + DPAA2_SET_FD_BPID(fd, bpid); + DPAA2_SET_FD_OFFSET(fd, mbuf->data_off); + DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); + + PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p", + (void *)mbuf, mbuf->buf_addr); + + PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); + /*free the original packet */ + rte_pktmbuf_free(mbuf); + + return 0; +} + uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -222,7 +272,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) qbman_pull_desc_set_fq(&pulldesc, fqid); /* todo optimization - we can have dq_storage_phys available*/ qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (dma_addr_t)(dq_storage), 1); + (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); /*Issue a volatile dequeue command. */ while (1) { @@ -263,7 +313,8 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } fd = qbman_result_DQ_fd(dq_storage); - mbuf = (struct rte_mbuf *)(DPAA2_GET_FD_ADDR(fd) + mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FD_ADDR(fd) - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); /* Prefeth mbuf */ rte_prefetch0(mbuf); @@ -331,8 +382,29 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) DPAA2_RESET_FD_CTRL((&fd_arr[loop])); DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL); mp = (*bufs)->pool; - bpid = mempool_to_bpid(mp); - eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid); + /* Not a hw_pkt pool allocated frame */ + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + PMD_TX_LOG(ERR, "non hw offload bufffer "); + /* alloc should be from the default buffer pool + * attached to this interface + */ + if (priv->bp_list) { + bpid = priv->bp_list->buf_pool.bpid; + } else { + PMD_TX_LOG(ERR, "errr: why no bpool" + " attached"); + num_tx = 0; + goto skip_tx; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + bufs++; + continue; + } + } else { + bpid = mempool_to_bpid(mp); + eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid); + } bufs++; } loop = 0; @@ -345,5 +417,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_q->tx_pkts += frames_to_send; nb_pkts -= frames_to_send; } +skip_tx: return num_tx; }