eventdev: switch sequence number to dynamic mbuf field
authorDavid Marchand <david.marchand@redhat.com>
Wed, 28 Oct 2020 12:20:11 +0000 (13:20 +0100)
committerThomas Monjalon <thomas@monjalon.net>
Sat, 31 Oct 2020 21:14:42 +0000 (22:14 +0100)
The eventdev drivers have been hacking the deprecated field seqn for
internal test usage.
It is moved to a dynamic mbuf field in order to allow removal of seqn.

Signed-off-by: David Marchand <david.marchand@redhat.com>
drivers/event/octeontx/ssovf_evdev_selftest.c
drivers/event/octeontx2/otx2_evdev_selftest.c
drivers/event/opdl/opdl_test.c
drivers/event/sw/sw_evdev_selftest.c
lib/librte_eventdev/rte_eventdev.c
lib/librte_eventdev/rte_eventdev_pmd.h
lib/librte_eventdev/version.map

index 7a2b7de..528f99d 100644 (file)
@@ -300,7 +300,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                update_event_and_validation_attr(m, &ev, flow_id, event_type,
                        sub_event_type, sched_type, queue, port);
                rte_event_enqueue_burst(evdev, port, &ev, 1);
@@ -320,7 +320,8 @@ check_excess_events(uint8_t port)
                valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
 
                RTE_TEST_ASSERT_SUCCESS(valid_event,
-                               "Unexpected valid event=%d", ev.mbuf->seqn);
+                       "Unexpected valid event=%d",
+                       *rte_event_pmd_selftest_seqn(ev.mbuf));
        }
        return 0;
 }
@@ -425,8 +426,9 @@ static int
 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
 {
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
-                       index, ev->mbuf->seqn);
+       RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+               "index=%d != seqn=%d", index,
+               *rte_event_pmd_selftest_seqn(ev->mbuf));
        return 0;
 }
 
@@ -509,10 +511,10 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
 
        expected_val += ev->queue_id;
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
-       "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
-                       ev->mbuf->seqn, index, expected_val, range,
-                       queue_count, MAX_EVENTS);
+       RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+               "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+               *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
+               queue_count, MAX_EVENTS);
        return 0;
 }
 
@@ -537,7 +539,7 @@ test_multi_queue_priority(void)
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                queue = i % queue_count;
                update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
                        0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
@@ -904,7 +906,7 @@ worker_flow_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -939,7 +941,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1059,7 +1061,7 @@ worker_group_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -1101,7 +1103,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1238,7 +1240,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(
                0x1 /*flow_id */,
                RTE_EVENT_TYPE_CPU /* event_type */,
@@ -1360,7 +1362,7 @@ worker_ordered_flow_producer(void *arg)
                if (m == NULL)
                        continue;
 
-               m->seqn = counter++;
+               *rte_event_pmd_selftest_seqn(m) = counter++;
 
                struct rte_event ev = {.event = 0, .u64 = 0};
 
index 334a9cc..48bfaf8 100644 (file)
@@ -279,7 +279,7 @@ inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                update_event_and_validation_attr(m, &ev, flow_id, event_type,
                                                 sub_event_type, sched_type,
                                                 queue, port);
@@ -301,7 +301,7 @@ check_excess_events(uint8_t port)
 
                RTE_TEST_ASSERT_SUCCESS(valid_event,
                                        "Unexpected valid event=%d",
-                                       ev.mbuf->seqn);
+                                       *rte_event_pmd_selftest_seqn(ev.mbuf));
        }
        return 0;
 }
@@ -406,8 +406,9 @@ static int
 validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
 {
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
-                             index, ev->mbuf->seqn);
+       RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+               "index=%d != seqn=%d",
+               index, *rte_event_pmd_selftest_seqn(ev->mbuf));
        return 0;
 }
 
@@ -493,10 +494,11 @@ validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
 
        expected_val += ev->queue_id;
        RTE_SET_USED(port);
-       RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
-       "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
-                             ev->mbuf->seqn, index, expected_val, range,
-                             queue_count, MAX_EVENTS);
+       RTE_TEST_ASSERT_EQUAL(
+               *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+               "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+               *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
+               range, queue_count, MAX_EVENTS);
        return 0;
 }
 
@@ -523,7 +525,7 @@ test_multi_queue_priority(void)
                m = rte_pktmbuf_alloc(eventdev_test_mempool);
                RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
 
-               m->seqn = i;
+               *rte_event_pmd_selftest_seqn(m) = i;
                queue = i % queue_count;
                update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
                                                 0, RTE_SCHED_TYPE_PARALLEL,
@@ -888,7 +890,9 @@ worker_flow_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+                       if (seqn_list_update(seqn) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -923,7 +927,7 @@ test_multiport_flow_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(0x1 /*flow_id */,
                            RTE_EVENT_TYPE_CPU /* event_type */,
                            0 /* sub_event_type (stage 0) */,
@@ -1043,7 +1047,9 @@ worker_group_based_pipeline(void *arg)
                        ev.op = RTE_EVENT_OP_FORWARD;
                        rte_event_enqueue_burst(evdev, port, &ev, 1);
                } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
-                       if (seqn_list_update(ev.mbuf->seqn) == 0) {
+                       uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+                       if (seqn_list_update(seqn) == 0) {
                                rte_pktmbuf_free(ev.mbuf);
                                rte_atomic32_sub(total_events, 1);
                        } else {
@@ -1084,7 +1090,7 @@ test_multiport_queue_sched_type_test(uint8_t in_sched_type,
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(0x1 /*flow_id */,
                            RTE_EVENT_TYPE_CPU /* event_type */,
                            0 /* sub_event_type (stage 0) */,
@@ -1222,7 +1228,7 @@ launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
                return 0;
        }
 
-       /* Injects events with m->seqn=0 to total_events */
+       /* Injects events with a 0 sequence number to total_events */
        ret = inject_events(0x1 /*flow_id */,
                            RTE_EVENT_TYPE_CPU /* event_type */,
                            0 /* sub_event_type (stage 0) */,
@@ -1348,7 +1354,7 @@ worker_ordered_flow_producer(void *arg)
                if (m == NULL)
                        continue;
 
-               m->seqn = counter++;
+               *rte_event_pmd_selftest_seqn(m) = counter++;
 
                struct rte_event ev = {.event = 0, .u64 = 0};
 
index e7a32fb..e4fc70a 100644 (file)
@@ -256,7 +256,7 @@ ordered_basic(struct test *t)
                ev.queue_id = t->qid[0];
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = mbufs[i];
-               mbufs[i]->seqn = MAGIC_SEQN + i;
+               *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
                /* generate pkt and enqueue */
                err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -281,7 +281,7 @@ ordered_basic(struct test *t)
                        rte_event_dev_dump(evdev, stdout);
                        return -1;
                }
-               seq = deq_ev[i].mbuf->seqn  - MAGIC_SEQN;
+               seq = *rte_event_pmd_selftest_seqn(deq_ev[i].mbuf)  - MAGIC_SEQN;
 
                if (seq != (i-1)) {
                        PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
@@ -396,7 +396,7 @@ atomic_basic(struct test *t)
                ev.op = RTE_EVENT_OP_NEW;
                ev.flow_id = 1;
                ev.mbuf = mbufs[i];
-               mbufs[i]->seqn = MAGIC_SEQN + i;
+               *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
                /* generate pkt and enqueue */
                err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -625,7 +625,7 @@ single_link_w_stats(struct test *t)
                ev.queue_id = t->qid[0];
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = mbufs[i];
-               mbufs[i]->seqn = 1234 + i;
+               *rte_event_pmd_selftest_seqn(mbufs[i]) = 1234 + i;
 
                /* generate pkt and enqueue */
                err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
index 0929fa6..e4bfb3a 100644 (file)
@@ -385,7 +385,7 @@ run_prio_packet_test(struct test *t)
                        printf("%d: gen of pkt failed\n", __LINE__);
                        return -1;
                }
-               arp->seqn = MAGIC_SEQN[i];
+               *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN[i];
 
                ev = (struct rte_event){
                        .priority = PRIORITY[i],
@@ -424,7 +424,7 @@ run_prio_packet_test(struct test *t)
                rte_event_dev_dump(evdev, stdout);
                return -1;
        }
-       if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+       if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
                printf("%d: first packet out not highest priority\n",
                                __LINE__);
                rte_event_dev_dump(evdev, stdout);
@@ -438,7 +438,7 @@ run_prio_packet_test(struct test *t)
                rte_event_dev_dump(evdev, stdout);
                return -1;
        }
-       if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+       if (*rte_event_pmd_selftest_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
                printf("%d: second packet out not lower priority\n",
                                __LINE__);
                rte_event_dev_dump(evdev, stdout);
@@ -482,7 +482,7 @@ test_single_directed_packet(struct test *t)
        }
 
        const uint32_t MAGIC_SEQN = 4711;
-       arp->seqn = MAGIC_SEQN;
+       *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
 
        /* generate pkt and enqueue */
        err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
@@ -521,7 +521,7 @@ test_single_directed_packet(struct test *t)
                return -1;
        }
 
-       if (ev.mbuf->seqn != MAGIC_SEQN) {
+       if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
                printf("%d: error magic sequence number not dequeued\n",
                                __LINE__);
                return -1;
@@ -939,7 +939,7 @@ xstats_tests(struct test *t)
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = arp;
                ev.flow_id = 7;
-               arp->seqn = i;
+               *rte_event_pmd_selftest_seqn(arp) = i;
 
                int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
                if (err != 1) {
@@ -1490,7 +1490,7 @@ xstats_id_reset_tests(struct test *t)
                ev.queue_id = t->qid[i];
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = arp;
-               arp->seqn = i;
+               *rte_event_pmd_selftest_seqn(arp) = i;
 
                int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
                if (err != 1) {
@@ -1878,7 +1878,7 @@ qid_priorities(struct test *t)
                ev.queue_id = t->qid[i];
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = arp;
-               arp->seqn = i;
+               *rte_event_pmd_selftest_seqn(arp) = i;
 
                int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
                if (err != 1) {
@@ -1899,7 +1899,7 @@ qid_priorities(struct test *t)
                return -1;
        }
        for (i = 0; i < 3; i++) {
-               if (ev[i].mbuf->seqn != 2-i) {
+               if (*rte_event_pmd_selftest_seqn(ev[i].mbuf) != 2-i) {
                        printf(
                                "%d: qid priority test: seqn %d incorrectly prioritized\n",
                                        __LINE__, i);
@@ -2376,7 +2376,7 @@ single_packet(struct test *t)
        ev.mbuf = arp;
        ev.queue_id = 0;
        ev.flow_id = 3;
-       arp->seqn = MAGIC_SEQN;
+       *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
 
        err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
        if (err != 1) {
@@ -2416,7 +2416,7 @@ single_packet(struct test *t)
        }
 
        err = test_event_dev_stats_get(evdev, &stats);
-       if (ev.mbuf->seqn != MAGIC_SEQN) {
+       if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
                printf("%d: magic sequence number not dequeued\n", __LINE__);
                return -1;
        }
@@ -2689,7 +2689,7 @@ parallel_basic(struct test *t, int check_order)
                ev.queue_id = t->qid[0];
                ev.op = RTE_EVENT_OP_NEW;
                ev.mbuf = mbufs[i];
-               mbufs[i]->seqn = MAGIC_SEQN + i;
+               *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
 
                /* generate pkt and enqueue */
                err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
@@ -2744,10 +2744,12 @@ parallel_basic(struct test *t, int check_order)
        /* Check to see if the sequence numbers are in expected order */
        if (check_order) {
                for (j = 0 ; j < deq_pkts ; j++) {
-                       if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
-                               printf(
-                                       "%d: Incorrect sequence number(%d) from port %d\n",
-                                       __LINE__, mbufs_out[j]->seqn, tx_port);
+                       if (*rte_event_pmd_selftest_seqn(deq_ev[j].mbuf) !=
+                                       MAGIC_SEQN + j) {
+                               printf("%d: Incorrect sequence number(%d) from port %d\n",
+                                       __LINE__,
+                                       *rte_event_pmd_selftest_seqn(mbufs_out[j]),
+                                       tx_port);
                                return -1;
                        }
                }
index 322453c..994bd1e 100644 (file)
@@ -1242,13 +1242,25 @@ int rte_event_dev_xstats_reset(uint8_t dev_id,
        return -ENOTSUP;
 }
 
+int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
+
 int rte_event_dev_selftest(uint8_t dev_id)
 {
        RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+       static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
+               .name = "rte_event_pmd_selftest_seqn_dynfield",
+               .size = sizeof(rte_event_pmd_selftest_seqn_t),
+               .align = __alignof__(rte_event_pmd_selftest_seqn_t),
+       };
        struct rte_eventdev *dev = &rte_eventdevs[dev_id];
 
-       if (dev->dev_ops->dev_selftest != NULL)
+       if (dev->dev_ops->dev_selftest != NULL) {
+               rte_event_pmd_selftest_seqn_dynfield_offset =
+                       rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
+               if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
+                       return -ENOMEM;
                return (*dev->dev_ops->dev_selftest)();
+       }
        return -ENOTSUP;
 }
 
index d118b9e..27be376 100644 (file)
@@ -20,10 +20,13 @@ extern "C" {
 #include <string.h>
 
 #include <rte_common.h>
+#include <rte_compat.h>
 #include <rte_config.h>
 #include <rte_dev.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
 
 #include "rte_eventdev.h"
 #include "rte_event_timer_adapter_pmd.h"
@@ -635,6 +638,23 @@ typedef int (*eventdev_eth_rx_adapter_stats_reset)
  */
 typedef int (*eventdev_selftest)(void);
 
+typedef uint32_t rte_event_pmd_selftest_seqn_t;
+extern int rte_event_pmd_selftest_seqn_dynfield_offset;
+
+/**
+ * Read test sequence number from mbuf.
+ *
+ * @param mbuf Structure to read from.
+ * @return pointer to test sequence number.
+ */
+__rte_internal
+static inline rte_event_pmd_selftest_seqn_t *
+rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
+{
+       return RTE_MBUF_DYNFIELD(mbuf,
+               rte_event_pmd_selftest_seqn_dynfield_offset,
+               rte_event_pmd_selftest_seqn_t *);
+}
 
 struct rte_cryptodev;
 
index 8ae8420..3e5c09c 100644 (file)
@@ -139,3 +139,9 @@ EXPERIMENTAL {
        # added in 20.11
        rte_event_pmd_pci_probe_named;
 };
+
+INTERNAL {
+       global:
+
+       rte_event_pmd_selftest_seqn_dynfield_offset;
+};