X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx%2Fssovf_evdev_selftest.c;h=528f99dd84db3488863c9566e6945467cfd5d352;hb=3ca55dac6d2c631dc98d4e0d1de99e8117d5c1ab;hp=239362fcf549281789c00b001766a37a196791a4;hpb=8384f0e039eae47cb1b951b2db41400a80fbaeb7;p=dpdk.git diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c index 239362fcf5..528f99dd84 100644 --- a/drivers/event/octeontx/ssovf_evdev_selftest.c +++ b/drivers/event/octeontx/ssovf_evdev_selftest.c @@ -300,7 +300,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type, m = rte_pktmbuf_alloc(eventdev_test_mempool); RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); - m->seqn = i; + *rte_event_pmd_selftest_seqn(m) = i; update_event_and_validation_attr(m, &ev, flow_id, event_type, sub_event_type, sched_type, queue, port); rte_event_enqueue_burst(evdev, port, &ev, 1); @@ -320,7 +320,8 @@ check_excess_events(uint8_t port) valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); RTE_TEST_ASSERT_SUCCESS(valid_event, - "Unexpected valid event=%d", ev.mbuf->seqn); + "Unexpected valid event=%d", + *rte_event_pmd_selftest_seqn(ev.mbuf)); } return 0; } @@ -425,8 +426,9 @@ static int validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev) { RTE_SET_USED(port); - RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", - index, ev->mbuf->seqn); + RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf), + "index=%d != seqn=%d", index, + *rte_event_pmd_selftest_seqn(ev->mbuf)); return 0; } @@ -509,10 +511,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev) expected_val += ev->queue_id; RTE_SET_USED(port); - RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val, - "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d", - ev->mbuf->seqn, index, expected_val, range, - queue_count, MAX_EVENTS); + RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val, + "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d", + *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range, + queue_count, MAX_EVENTS); return 0; } @@ -537,7 +539,7 @@ test_multi_queue_priority(void) m = rte_pktmbuf_alloc(eventdev_test_mempool); RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); - m->seqn = i; + *rte_event_pmd_selftest_seqn(m) = i; queue = i % queue_count; update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU, 0, RTE_SCHED_TYPE_PARALLEL, queue, 0); @@ -601,8 +603,8 @@ wait_workers_to_join(int lcore, const rte_atomic32_t *count) static inline int -launch_workers_and_wait(int (*master_worker)(void *), - int (*slave_workers)(void *), uint32_t total_events, +launch_workers_and_wait(int (*main_worker)(void *), + int (*worker)(void *), uint32_t total_events, uint8_t nb_workers, uint8_t sched_type) { uint8_t port = 0; @@ -637,9 +639,9 @@ launch_workers_and_wait(int (*master_worker)(void *), w_lcore = rte_get_next_lcore( /* start core */ -1, - /* skip master */ 1, + /* skip main */ 1, /* wrap */ 0); - rte_eal_remote_launch(master_worker, ¶m[0], w_lcore); + rte_eal_remote_launch(main_worker, ¶m[0], w_lcore); for (port = 1; port < nb_workers; port++) { param[port].total_events = &atomic_total_events; @@ -648,7 +650,7 @@ launch_workers_and_wait(int (*master_worker)(void *), param[port].dequeue_tmo_ticks = dequeue_tmo_ticks; rte_smp_wmb(); w_lcore = rte_get_next_lcore(w_lcore, 1, 0); - rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore); + rte_eal_remote_launch(worker, ¶m[port], w_lcore); } ret = wait_workers_to_join(w_lcore, &atomic_total_events); @@ -904,7 +906,7 @@ worker_flow_based_pipeline(void *arg) ev.op = RTE_EVENT_OP_FORWARD; rte_event_enqueue_burst(evdev, port, &ev, 1); } else if (ev.sub_event_type == 1) { /* Events from stage 1*/ - if (seqn_list_update(ev.mbuf->seqn) == 0) { + if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) { rte_pktmbuf_free(ev.mbuf); rte_atomic32_sub(total_events, 1); } else { @@ -939,7 +941,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type, return 0; } - /* Injects events with m->seqn=0 to total_events */ + /* Injects events with a 0 sequence number to total_events */ ret = inject_events( 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */, @@ -1059,7 +1061,7 @@ worker_group_based_pipeline(void *arg) ev.op = RTE_EVENT_OP_FORWARD; rte_event_enqueue_burst(evdev, port, &ev, 1); } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/ - if (seqn_list_update(ev.mbuf->seqn) == 0) { + if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) { rte_pktmbuf_free(ev.mbuf); rte_atomic32_sub(total_events, 1); } else { @@ -1101,7 +1103,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type, return 0; } - /* Injects events with m->seqn=0 to total_events */ + /* Injects events with a 0 sequence number to total_events */ ret = inject_events( 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */, @@ -1238,7 +1240,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *)) return 0; } - /* Injects events with m->seqn=0 to total_events */ + /* Injects events with a 0 sequence number to total_events */ ret = inject_events( 0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */, @@ -1360,7 +1362,7 @@ worker_ordered_flow_producer(void *arg) if (m == NULL) continue; - m->seqn = counter++; + *rte_event_pmd_selftest_seqn(m) = counter++; struct rte_event ev = {.event = 0, .u64 = 0};