X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_rxtx.c;h=6201de46063f3943f9d4224c16d5bca2835d119f;hb=ea2780632f6563f100d0b324b6d027b3b51d2b78;hp=630f8c73c711f7364b6294a26f7f37f9fdaccd12;hpb=d527f5d9bb3a91e70345e1d80b2bf52b69af4617;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 630f8c73c7..6201de4606 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -42,10 +42,12 @@ static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused; } while (0) static inline void __rte_hot -dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) +dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd, + void *hw_annot_addr) { - struct dpaa2_annot_hdr *annotation; uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); + struct dpaa2_annot_hdr *annotation = + (struct dpaa2_annot_hdr *)hw_annot_addr; m->packet_type = RTE_PTYPE_UNKNOWN; switch (frc) { @@ -101,17 +103,12 @@ dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; break; default: - m->packet_type = dpaa2_dev_rx_parse_slow(m, - (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE)); + m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation); } m->hash.rss = fd->simple.flc_hi; m->ol_flags |= PKT_RX_RSS_HASH; - if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { - annotation = (struct dpaa2_annot_hdr *) - ((size_t)DPAA2_IOVA_TO_VADDR( - DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); + if (dpaa2_enable_ts[m->port]) { m->timestamp = annotation->word2; m->ol_flags |= PKT_RX_TIMESTAMP; DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); @@ -266,9 +263,11 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd, struct qbman_sge *sgt, *sge; size_t sg_addr, fd_addr; int i = 0; + void *hw_annot_addr; struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE); /* Get Scatter gather table address */ sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); @@ -289,11 +288,10 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd, first_seg->next = NULL; first_seg->port = port_id; if (dpaa2_svr_family == SVR_LX2160A) - dpaa2_dev_rx_parse_new(first_seg, fd); + dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr); else - first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, - (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) - + DPAA2_FD_PTA_SIZE)); + first_seg->packet_type = + dpaa2_dev_rx_parse(first_seg, hw_annot_addr); rte_mbuf_refcnt_set(first_seg, 1); cur_seg = first_seg; @@ -324,8 +322,9 @@ static inline struct rte_mbuf *__rte_hot eth_fd_to_mbuf(const struct qbman_fd *fd, int port_id) { - void *iova_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); - struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(iova_addr, + void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); + struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr, rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); /* need to repopulated some of the fields, @@ -347,10 +346,9 @@ eth_fd_to_mbuf(const struct qbman_fd *fd, */ if (dpaa2_svr_family == SVR_LX2160A) - dpaa2_dev_rx_parse_new(mbuf, fd); + dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr); else - mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, - (void *)((size_t)iova_addr + DPAA2_FD_PTA_SIZE)); + mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr); DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", @@ -712,7 +710,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused, ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); dqrr_index = qbman_get_dqrr_idx(dq); - ev->mbuf->seqn = dqrr_index + 1; + *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; @@ -738,9 +736,9 @@ dpaa2_dev_process_ordered_event(struct qbman_swp *swp, ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id); - ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; - ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; - ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; + *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP; + *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; + *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; qbman_swp_dqrr_consume(swp, dq); } @@ -1065,14 +1063,14 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_eqcr_size : nb_pkts; for (loop = 0; loop < frames_to_send; loop++) { - if ((*bufs)->seqn) { - uint8_t dqrr_index = (*bufs)->seqn - 1; + if (*dpaa2_seqn(*bufs)) { + uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1; flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; DPAA2_PER_LCORE_DQRR_SIZE--; DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); - (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN; } if (likely(RTE_MBUF_DIRECT(*bufs))) { @@ -1232,10 +1230,10 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid); - if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { - orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> + if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) { + orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >> DPAA2_EQCR_OPRID_SHIFT; - seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> + seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >> DPAA2_EQCR_SEQNUM_SHIFT; if (!priv->en_loose_ordered) { @@ -1257,12 +1255,12 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); } } else { - dq_idx = m->seqn - 1; + dq_idx = *dpaa2_seqn(m) - 1; qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); DPAA2_PER_LCORE_DQRR_SIZE--; DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); } - m->seqn = DPAA2_INVALID_MBUF_SEQN; + *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN; } /* Callback to handle sending ordered packets through WRIOP based interface */ @@ -1316,7 +1314,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dpaa2_eqcr_size : nb_pkts; if (!priv->en_loose_ordered) { - if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) { num_free_eq_desc = dpaa2_free_eq_descriptors(); if (num_free_eq_desc < frames_to_send) frames_to_send = num_free_eq_desc; @@ -1327,7 +1325,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /*Prepare enqueue descriptor*/ qbman_eq_desc_clear(&eqdesc[loop]); - if ((*bufs)->seqn) { + if (*dpaa2_seqn(*bufs)) { /* Use only queue 0 for Tx in case of atomic/ * ordered packets as packets can get unordered * when being tranmitted out from the interface