gpudev: add alignment for memory allocation
[dpdk.git] / app / test-eventdev / test_order_queue.c
index 495efd9..80eaea5 100644 (file)
@@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev)
 }
 
 static int
-order_queue_worker(void *arg)
+order_queue_worker(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev;
@@ -28,12 +28,15 @@ order_queue_worker(void *arg)
                uint16_t event = rte_event_dequeue_burst(dev_id, port,
                                        &ev, 1, 0);
                if (!event) {
-                       if (rte_atomic64_read(outstand_pkts) <= 0)
+                       if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
                                break;
                        rte_pause();
                        continue;
                }
 
+               if (!flow_id_cap)
+                       order_flow_id_copy_from_mbuf(t, &ev);
+
                if (ev.queue_id == 0) { /* from ordered queue */
                        order_queue_process_stage_0(&ev);
                        while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
@@ -50,7 +53,7 @@ order_queue_worker(void *arg)
 }
 
 static int
-order_queue_worker_burst(void *arg)
+order_queue_worker_burst(void *arg, const bool flow_id_cap)
 {
        ORDER_WORKER_INIT;
        struct rte_event ev[BURST_SIZE];
@@ -61,13 +64,17 @@ order_queue_worker_burst(void *arg)
                                BURST_SIZE, 0);
 
                if (nb_rx == 0) {
-                       if (rte_atomic64_read(outstand_pkts) <= 0)
+                       if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0)
                                break;
                        rte_pause();
                        continue;
                }
 
                for (i = 0; i < nb_rx; i++) {
+
+                       if (!flow_id_cap)
+                               order_flow_id_copy_from_mbuf(t, &ev[i]);
+
                        if (ev[i].queue_id == 0) { /* from ordered queue */
                                order_queue_process_stage_0(&ev[i]);
                        } else if (ev[i].queue_id == 1) {/* from atomic queue */
@@ -95,11 +102,19 @@ worker_wrapper(void *arg)
 {
        struct worker_data *w  = arg;
        const bool burst = evt_has_burst_mode(w->dev_id);
-
-       if (burst)
-               return order_queue_worker_burst(arg);
-       else
-               return order_queue_worker(arg);
+       const bool flow_id_cap = evt_has_flow_id(w->dev_id);
+
+       if (burst) {
+               if (flow_id_cap)
+                       return order_queue_worker_burst(arg, true);
+               else
+                       return order_queue_worker_burst(arg, false);
+       } else {
+               if (flow_id_cap)
+                       return order_queue_worker(arg, true);
+               else
+                       return order_queue_worker(arg, false);
+       }
 }
 
 static int