X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_order_queue.c;h=80eaea5cf5e7185ec597c35e04c5182d21ac7734;hb=c8557ed434213fe11ecd867819823ea6865311d2;hp=a272c7a57d4d65d7be0928c2dc1334fd05eae4d6;hpb=43d162bc168e5c66346acf9f464495a088a5a9f0;p=dpdk.git diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c index a272c7a57d..80eaea5cf5 100644 --- a/app/test-eventdev/test_order_queue.c +++ b/app/test-eventdev/test_order_queue.c @@ -9,7 +9,7 @@ /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */ -static inline __attribute__((always_inline)) void +static __rte_always_inline void order_queue_process_stage_0(struct rte_event *const ev) { ev->queue_id = 1; /* q1 atomic queue */ @@ -19,7 +19,7 @@ order_queue_process_stage_0(struct rte_event *const ev) } static int -order_queue_worker(void *arg) +order_queue_worker(void *arg, const bool flow_id_cap) { ORDER_WORKER_INIT; struct rte_event ev; @@ -28,12 +28,15 @@ order_queue_worker(void *arg) uint16_t event = rte_event_dequeue_burst(dev_id, port, &ev, 1, 0); if (!event) { - if (rte_atomic64_read(outstand_pkts) <= 0) + if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0) break; rte_pause(); continue; } + if (!flow_id_cap) + order_flow_id_copy_from_mbuf(t, &ev); + if (ev.queue_id == 0) { /* from ordered queue */ order_queue_process_stage_0(&ev); while (rte_event_enqueue_burst(dev_id, port, &ev, 1) @@ -50,7 +53,7 @@ order_queue_worker(void *arg) } static int -order_queue_worker_burst(void *arg) +order_queue_worker_burst(void *arg, const bool flow_id_cap) { ORDER_WORKER_INIT; struct rte_event ev[BURST_SIZE]; @@ -61,13 +64,17 @@ order_queue_worker_burst(void *arg) BURST_SIZE, 0); if (nb_rx == 0) { - if (rte_atomic64_read(outstand_pkts) <= 0) + if (__atomic_load_n(outstand_pkts, __ATOMIC_RELAXED) <= 0) break; rte_pause(); continue; } for (i = 0; i < nb_rx; i++) { + + if (!flow_id_cap) + order_flow_id_copy_from_mbuf(t, &ev[i]); + if (ev[i].queue_id == 0) { /* from ordered queue */ order_queue_process_stage_0(&ev[i]); } else if (ev[i].queue_id == 1) {/* from atomic queue */ @@ -95,11 +102,19 @@ worker_wrapper(void *arg) { struct worker_data *w = arg; const bool burst = evt_has_burst_mode(w->dev_id); - - if (burst) - return order_queue_worker_burst(arg); - else - return order_queue_worker(arg); + const bool flow_id_cap = evt_has_flow_id(w->dev_id); + + if (burst) { + if (flow_id_cap) + return order_queue_worker_burst(arg, true); + else + return order_queue_worker_burst(arg, false); + } else { + if (flow_id_cap) + return order_queue_worker(arg, true); + else + return order_queue_worker(arg, false); + } } static int @@ -118,16 +133,7 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) /* number of active worker cores + 1 producer */ const uint8_t nb_ports = nb_workers + 1; - const struct rte_event_dev_config config = { - .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */ - .nb_event_ports = nb_ports, - .nb_events_limit = 4096, - .nb_event_queue_flows = opt->nb_flows, - .nb_event_port_dequeue_depth = 128, - .nb_event_port_enqueue_depth = 128, - }; - - ret = rte_event_dev_configure(opt->dev_id, &config); + ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports); if (ret) { evt_err("failed to configure eventdev %d", opt->dev_id); return ret;