net/dpaa2: prefetch the annotation in event processing
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
index 97d0f45..83e0e71 100644 (file)
@@ -9,7 +9,7 @@
 #include <net/if.h>
 
 #include <rte_mbuf.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
@@ -166,15 +166,24 @@ parse_done:
        return pkt_type;
 }
 
-
 static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
 {
        struct dpaa2_annot_hdr *annotation =
                        (struct dpaa2_annot_hdr *)hw_annot_addr;
 
        PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
 
+       /* Check offloads first */
+       if (BIT_ISSET_AT_POS(annotation->word3,
+                            L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
+               mbuf->ol_flags |= PKT_RX_VLAN;
+
+       if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+       else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
        /* Return some common types from parse processing */
        switch (annotation->word4) {
        case DPAA2_L3_IPv4:
@@ -201,23 +210,6 @@ dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
        return dpaa2_dev_rx_parse_slow(hw_annot_addr);
 }
 
-static inline void __attribute__((hot))
-dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
-{
-       struct dpaa2_annot_hdr *annotation =
-               (struct dpaa2_annot_hdr *)hw_annot_addr;
-
-       if (BIT_ISSET_AT_POS(annotation->word3,
-                            L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
-               mbuf->ol_flags |= PKT_RX_VLAN;
-
-       if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
-               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-       if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
-               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-}
-
 static inline struct rte_mbuf *__attribute__((hot))
 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 {
@@ -249,14 +241,11 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
        if (dpaa2_svr_family == SVR_LX2160A)
                dpaa2_dev_rx_parse_frc(first_seg,
                                DPAA2_GET_FD_FRC_PARSE_SUM(fd));
-       else {
-               first_seg->packet_type = dpaa2_dev_rx_parse(
+       else
+               first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
                         + DPAA2_FD_PTA_SIZE);
-               dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
-                       DPAA2_GET_FD_ADDR(fd)) +
-                       DPAA2_FD_PTA_SIZE, first_seg);
-       }
+
        rte_mbuf_refcnt_set(first_seg, 1);
        cur_seg = first_seg;
        while (!DPAA2_SG_IS_FINAL(sge)) {
@@ -308,14 +297,10 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
 
        if (dpaa2_svr_family == SVR_LX2160A)
                dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
-       else {
-               mbuf->packet_type = dpaa2_dev_rx_parse(
+       else
+               mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
                        (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
                         + DPAA2_FD_PTA_SIZE);
-               dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
-                            DPAA2_GET_FD_ADDR(fd)) +
-                            DPAA2_FD_PTA_SIZE, mbuf);
-       }
 
        PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
                "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
@@ -335,10 +320,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
        struct qbman_sge *sgt, *sge = NULL;
        int i;
 
-       /* First Prepare FD to be transmited*/
-       /* Resetting the buffer pool id and offset field*/
-       fd->simple.bpid_offset = 0;
-
        if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
                int ret = rte_vlan_insert(&mbuf);
                if (ret)
@@ -353,8 +334,8 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 
        DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
        DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+       DPAA2_SET_ONLY_FD_BPID(fd, bpid);
        DPAA2_SET_FD_OFFSET(fd, temp->data_off);
-       DPAA2_SET_FD_BPID(fd, bpid);
        DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
        DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
        /*Set Scatter gather table and Scatter gather entries*/
@@ -417,8 +398,6 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
                        return;
                }
        }
-       /*Resetting the buffer pool id and offset field*/
-       fd->simple.bpid_offset = 0;
 
        DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
@@ -474,9 +453,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
        m->packet_type = mbuf->packet_type;
        m->tx_offload = mbuf->tx_offload;
 
-       /*Resetting the buffer pool id and offset field*/
-       fd->simple.bpid_offset = 0;
-
        DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
 
        PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
@@ -700,7 +676,8 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
                                 struct dpaa2_queue *rxq,
                                 struct rte_event *ev)
 {
-       ev->mbuf = eth_fd_to_mbuf(fd);
+       rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+               DPAA2_FD_PTA_SIZE + 16));
 
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
@@ -710,18 +687,22 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
+       ev->mbuf = eth_fd_to_mbuf(fd);
+
        qbman_swp_dqrr_consume(swp, dq);
 }
 
-void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
-                                   const struct qbman_fd *fd,
-                                   const struct qbman_result *dq,
-                                   struct dpaa2_queue *rxq,
-                                   struct rte_event *ev)
+void __attribute__((hot))
+dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+                              const struct qbman_fd *fd,
+                              const struct qbman_result *dq,
+                              struct dpaa2_queue *rxq,
+                              struct rte_event *ev)
 {
-       uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+       uint8_t dqrr_index;
 
-       ev->mbuf = eth_fd_to_mbuf(fd);
+       rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+               DPAA2_FD_PTA_SIZE + 16));
 
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
@@ -731,6 +712,9 @@ void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
+       ev->mbuf = eth_fd_to_mbuf(fd);
+
+       dqrr_index = qbman_get_dqrr_idx(dq);
        ev->mbuf->seqn = dqrr_index + 1;
        DPAA2_PER_LCORE_DQRR_SIZE++;
        DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;