net/dpaa2: prefetch the annotation in event processing
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
index 3d45669..83e0e71 100644 (file)
@@ -334,8 +334,8 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 
        DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
        DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
+       DPAA2_SET_ONLY_FD_BPID(fd, bpid);
        DPAA2_SET_FD_OFFSET(fd, temp->data_off);
-       DPAA2_SET_FD_BPID(fd, bpid);
        DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
        DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
        /*Set Scatter gather table and Scatter gather entries*/
@@ -676,7 +676,8 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
                                 struct dpaa2_queue *rxq,
                                 struct rte_event *ev)
 {
-       ev->mbuf = eth_fd_to_mbuf(fd);
+       rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+               DPAA2_FD_PTA_SIZE + 16));
 
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
@@ -686,18 +687,22 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
+       ev->mbuf = eth_fd_to_mbuf(fd);
+
        qbman_swp_dqrr_consume(swp, dq);
 }
 
-void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
-                                   const struct qbman_fd *fd,
-                                   const struct qbman_result *dq,
-                                   struct dpaa2_queue *rxq,
-                                   struct rte_event *ev)
+void __attribute__((hot))
+dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+                              const struct qbman_fd *fd,
+                              const struct qbman_result *dq,
+                              struct dpaa2_queue *rxq,
+                              struct rte_event *ev)
 {
-       uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
+       uint8_t dqrr_index;
 
-       ev->mbuf = eth_fd_to_mbuf(fd);
+       rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+               DPAA2_FD_PTA_SIZE + 16));
 
        ev->flow_id = rxq->ev.flow_id;
        ev->sub_event_type = rxq->ev.sub_event_type;
@@ -707,6 +712,9 @@ void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)
        ev->queue_id = rxq->ev.queue_id;
        ev->priority = rxq->ev.priority;
 
+       ev->mbuf = eth_fd_to_mbuf(fd);
+
+       dqrr_index = qbman_get_dqrr_idx(dq);
        ev->mbuf->seqn = dqrr_index + 1;
        DPAA2_PER_LCORE_DQRR_SIZE++;
        DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
@@ -776,7 +784,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
                        }
 
-                       fd_arr[loop].simple.bpid_offset = 0;
                        fd_arr[loop].simple.frc = 0;
                        DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
                        DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);