X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=test%2Ftest%2Ftest_eventdev_sw.c;h=b86b137ed82216329476f703b3c78f5e76c71313;hb=4c00cfdc0ea225f2518a35db928ad1ab02b2a724;hp=c37e345b6353b45d27b3cdf35d40cb9492736eec;hpb=0e1eadd0d682725ba9cc1fb5c35ae3946eb104e5;p=dpdk.git diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c index c37e345b63..b86b137ed8 100644 --- a/test/test/test_eventdev_sw.c +++ b/test/test/test_eventdev_sw.c @@ -39,7 +39,6 @@ #include #include -#include #include #include #include @@ -47,8 +46,11 @@ #include #include #include - #include +#include +#include +#include + #include "test.h" #define MAX_PORTS 16 @@ -62,6 +64,7 @@ struct test { uint8_t port[MAX_PORTS]; uint8_t qid[MAX_QIDS]; int nb_qids; + uint32_t service_id; }; static struct rte_event release_ev; @@ -218,7 +221,7 @@ create_lb_qids(struct test *t, int num_qids, uint32_t flags) /* Q creation */ const struct rte_event_queue_conf conf = { - .event_queue_cfg = flags, + .schedule_type = flags, .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .nb_atomic_flows = 1024, .nb_atomic_order_sequences = 1024, @@ -241,20 +244,20 @@ create_lb_qids(struct test *t, int num_qids, uint32_t flags) static inline int create_atomic_qids(struct test *t, int num_qids) { - return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY); + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC); } static inline int create_ordered_qids(struct test *t, int num_qids) { - return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY); + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED); } static inline int create_unordered_qids(struct test *t, int num_qids) { - return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY); + return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL); } static inline int @@ -266,8 +269,6 @@ create_directed_qids(struct test *t, int num_qids, const uint8_t ports[]) static const struct rte_event_queue_conf conf = { .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, - .nb_atomic_flows = 1024, - .nb_atomic_order_sequences = 1024, }; for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) { @@ -416,7 +417,7 @@ run_prio_packet_test(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); struct test_event_dev_stats stats; err = test_event_dev_stats_get(evdev, &stats); @@ -508,7 +509,7 @@ test_single_directed_packet(struct test *t) } /* Run schedule() as dir packets may need to be re-ordered */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); struct test_event_dev_stats stats; err = test_event_dev_stats_get(evdev, &stats); @@ -548,6 +549,50 @@ test_single_directed_packet(struct test *t) return 0; } +static int +test_directed_forward_credits(struct test *t) +{ + uint32_t i; + int32_t err; + + if (init(t, 1, 1) < 0 || + create_ports(t, 1) < 0 || + create_directed_qids(t, 1, t->port) < 0) + return -1; + + if (rte_event_dev_start(evdev) < 0) { + printf("%d: Error with start call\n", __LINE__); + return -1; + } + + struct rte_event ev = { + .op = RTE_EVENT_OP_NEW, + .queue_id = 0, + }; + + for (i = 0; i < 1000; i++) { + err = rte_event_enqueue_burst(evdev, 0, &ev, 1); + if (err < 0) { + printf("%d: error failed to enqueue\n", __LINE__); + return -1; + } + rte_service_run_iter_on_app_lcore(t->service_id); + + uint32_t deq_pkts; + deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); + if (deq_pkts != 1) { + printf("%d: error failed to deq\n", __LINE__); + return -1; + } + + /* re-write event to be a forward, and continue looping it */ + ev.op = RTE_EVENT_OP_FORWARD; + } + + cleanup(t); + return 0; +} + static int test_priority_directed(struct test *t) @@ -693,7 +738,7 @@ burst_packets(struct test *t) return -1; } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* Check stats for all NUM_PKTS arrived to sched core */ struct test_event_dev_stats stats; @@ -782,7 +827,7 @@ abuse_inflights(struct test *t) } /* schedule */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); struct test_event_dev_stats stats; @@ -920,7 +965,7 @@ xstats_tests(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* Device names / values */ int num_stats = rte_event_dev_xstats_names_get(evdev, @@ -1195,7 +1240,7 @@ port_reconfig_credits(struct test *t) const uint32_t NUM_ITERS = 32; for (i = 0; i < NUM_ITERS; i++) { const struct rte_event_queue_conf conf = { - .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .nb_atomic_flows = 1024, .nb_atomic_order_sequences = 1024, @@ -1247,7 +1292,7 @@ port_reconfig_credits(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); struct rte_event ev[NPKTS]; int deq = rte_event_dequeue_burst(evdev, t->port[0], ev, @@ -1277,7 +1322,7 @@ port_single_lb_reconfig(struct test *t) static const struct rte_event_queue_conf conf_lb_atomic = { .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, - .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, .nb_atomic_flows = 1024, .nb_atomic_order_sequences = 1024, }; @@ -1289,8 +1334,6 @@ port_single_lb_reconfig(struct test *t) static const struct rte_event_queue_conf conf_single_link = { .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, - .nb_atomic_flows = 1024, - .nb_atomic_order_sequences = 1024, }; if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) { printf("%d: error creating qid\n", __LINE__); @@ -1475,7 +1518,7 @@ xstats_id_reset_tests(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); static const char * const dev_names[] = { "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls", @@ -1777,7 +1820,7 @@ ordered_reconfigure(struct test *t) } const struct rte_event_queue_conf conf = { - .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY, + .schedule_type = RTE_SCHED_TYPE_ORDERED, .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .nb_atomic_flows = 1024, .nb_atomic_order_sequences = 1024, @@ -1824,7 +1867,7 @@ qid_priorities(struct test *t) for (i = 0; i < 3; i++) { /* Create QID */ const struct rte_event_queue_conf conf = { - .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, + .schedule_type = RTE_SCHED_TYPE_ATOMIC, /* increase priority (0 == highest), as we go */ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i, .nb_atomic_flows = 1024, @@ -1866,7 +1909,7 @@ qid_priorities(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* dequeue packets, verify priority was upheld */ struct rte_event ev[32]; @@ -1947,7 +1990,7 @@ load_balancing(struct test *t) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); struct test_event_dev_stats stats; err = test_event_dev_stats_get(evdev, &stats); @@ -2047,7 +2090,7 @@ load_balancing_history(struct test *t) } /* call the scheduler */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* Dequeue the flow 0 packet from port 1, so that we can then drop */ struct rte_event ev; @@ -2064,7 +2107,7 @@ load_balancing_history(struct test *t) rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1); /* call the scheduler */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* * Set up the next set of flows, first a new flow to fill up @@ -2097,7 +2140,7 @@ load_balancing_history(struct test *t) } /* schedule */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (err) { @@ -2141,7 +2184,7 @@ load_balancing_history(struct test *t) while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0)) rte_event_enqueue_burst(evdev, i, &release_ev, 1); } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); cleanup(t); return 0; @@ -2207,7 +2250,7 @@ invalid_qid(struct test *t) } /* call the scheduler */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (err) { @@ -2292,7 +2335,7 @@ single_packet(struct test *t) return -1; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (err) { @@ -2335,7 +2378,7 @@ single_packet(struct test *t) printf("%d: Failed to enqueue\n", __LINE__); return -1; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (stats.port_inflight[wrk_enq] != 0) { @@ -2423,7 +2466,7 @@ inflight_counts(struct test *t) } /* schedule */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (err) { @@ -2479,7 +2522,7 @@ inflight_counts(struct test *t) * As the scheduler core decrements inflights, it needs to run to * process packets to act on the drop messages */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (stats.port_inflight[p1] != 0) { @@ -2514,7 +2557,7 @@ inflight_counts(struct test *t) * As the scheduler core decrements inflights, it needs to run to * process packets to act on the drop messages */ - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); err = test_event_dev_stats_get(evdev, &stats); if (stats.port_inflight[p2] != 0) { @@ -2608,7 +2651,7 @@ parallel_basic(struct test *t, int check_order) } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* use extra slot to make logic in loops easier */ struct rte_event deq_ev[w3_port + 1]; @@ -2635,7 +2678,7 @@ parallel_basic(struct test *t, int check_order) return -1; } } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* dequeue from the tx ports, we should get 3 packets */ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, @@ -2713,7 +2756,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ printf("%d: Error doing first enqueue\n", __LINE__); goto err; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL) != 1) @@ -2738,7 +2781,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ printf("%d: Error with enqueue\n", __LINE__); goto err; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); } while (rte_event_dev_xstats_by_name_get(evdev, rx_port_free_stat, NULL) != 0); @@ -2748,7 +2791,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ printf("%d: Error with enqueue\n", __LINE__); goto err; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); /* check that the other port still has an empty CQ */ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) @@ -2771,7 +2814,7 @@ holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ printf("%d: Error with enqueue\n", __LINE__); goto err; } - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) != 1) { @@ -2961,7 +3004,7 @@ worker_loopback(struct test *t) while (rte_eal_get_lcore_state(p_lcore) != FINISHED || rte_eal_get_lcore_state(w_lcore) != FINISHED) { - rte_event_schedule(evdev); + rte_service_run_iter_on_app_lcore(t->service_id); uint64_t new_cycles = rte_get_timer_cycles(); @@ -2988,7 +3031,8 @@ worker_loopback(struct test *t) cycles = new_cycles; } } - rte_event_schedule(evdev); /* ensure all completions are flushed */ + rte_service_run_iter_on_app_lcore(t->service_id); + /* ensure all completions are flushed */ rte_eal_mp_wait_lcore(); @@ -3025,6 +3069,14 @@ test_sw_eventdev(void) } } + if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) { + printf("Failed to get service ID for software event dev\n"); + return -1; + } + + rte_service_runstate_set(t->service_id, 1); + rte_service_set_runstate_mapped_check(t->service_id, 0); + /* Only create mbuf pool once, reuse for each test run */ if (!eventdev_func_mempool) { eventdev_func_mempool = rte_pktmbuf_pool_create( @@ -3040,13 +3092,18 @@ test_sw_eventdev(void) } } t->mbuf_pool = eventdev_func_mempool; - printf("*** Running Single Directed Packet test...\n"); ret = test_single_directed_packet(t); if (ret != 0) { printf("ERROR - Single Directed Packet test FAILED.\n"); return ret; } + printf("*** Running Directed Forward Credit test...\n"); + ret = test_directed_forward_credits(t); + if (ret != 0) { + printf("ERROR - Directed Forward Credit test FAILED.\n"); + return ret; + } printf("*** Running Single Load Balanced Packet test...\n"); ret = single_packet(t); if (ret != 0) {