m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type, queue, port);
rte_event_enqueue_burst(evdev, port, &ev, 1);
valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
RTE_TEST_ASSERT_SUCCESS(valid_event,
- "Unexpected valid event=%d", ev.mbuf->seqn);
+ "Unexpected valid event=%d",
+ *rte_event_pmd_selftest_seqn(ev.mbuf));
}
return 0;
}
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+ "index=%d != seqn=%d", index,
+ *rte_event_pmd_selftest_seqn(ev->mbuf));
return 0;
}
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range,
+ queue_count, MAX_EVENTS);
return 0;
}
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(
0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_pmd_selftest_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
update_event_and_validation_attr(m, &ev, flow_id, event_type,
sub_event_type, sched_type,
queue, port);
RTE_TEST_ASSERT_SUCCESS(valid_event,
"Unexpected valid event=%d",
- ev.mbuf->seqn);
+ *rte_event_pmd_selftest_seqn(ev.mbuf));
}
return 0;
}
validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
{
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d",
- index, ev->mbuf->seqn);
+ RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+ "index=%d != seqn=%d",
+ index, *rte_event_pmd_selftest_seqn(ev->mbuf));
return 0;
}
expected_val += ev->queue_id;
RTE_SET_USED(port);
- RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
- "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
- ev->mbuf->seqn, index, expected_val, range,
- queue_count, MAX_EVENTS);
+ RTE_TEST_ASSERT_EQUAL(
+ *rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
+ range, queue_count, MAX_EVENTS);
return 0;
}
m = rte_pktmbuf_alloc(eventdev_test_mempool);
RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
- m->seqn = i;
+ *rte_event_pmd_selftest_seqn(m) = i;
queue = i % queue_count;
update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
0, RTE_SCHED_TYPE_PARALLEL,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+ if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
ev.op = RTE_EVENT_OP_FORWARD;
rte_event_enqueue_burst(evdev, port, &ev, 1);
} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
- if (seqn_list_update(ev.mbuf->seqn) == 0) {
+ uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+ if (seqn_list_update(seqn) == 0) {
rte_pktmbuf_free(ev.mbuf);
rte_atomic32_sub(total_events, 1);
} else {
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
return 0;
}
- /* Injects events with m->seqn=0 to total_events */
+ /* Injects events with a 0 sequence number to total_events */
ret = inject_events(0x1 /*flow_id */,
RTE_EVENT_TYPE_CPU /* event_type */,
0 /* sub_event_type (stage 0) */,
if (m == NULL)
continue;
- m->seqn = counter++;
+ *rte_event_pmd_selftest_seqn(m) = counter++;
struct rte_event ev = {.event = 0, .u64 = 0};
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
rte_event_dev_dump(evdev, stdout);
return -1;
}
- seq = deq_ev[i].mbuf->seqn - MAGIC_SEQN;
+ seq = *rte_event_pmd_selftest_seqn(deq_ev[i].mbuf) - MAGIC_SEQN;
if (seq != (i-1)) {
PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
ev.op = RTE_EVENT_OP_NEW;
ev.flow_id = 1;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = 1234 + i;
+ *rte_event_pmd_selftest_seqn(mbufs[i]) = 1234 + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
printf("%d: gen of pkt failed\n", __LINE__);
return -1;
}
- arp->seqn = MAGIC_SEQN[i];
+ *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN[i];
ev = (struct rte_event){
.priority = PRIORITY[i],
rte_event_dev_dump(evdev, stdout);
return -1;
}
- if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+ if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN[1]) {
printf("%d: first packet out not highest priority\n",
__LINE__);
rte_event_dev_dump(evdev, stdout);
rte_event_dev_dump(evdev, stdout);
return -1;
}
- if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+ if (*rte_event_pmd_selftest_seqn(ev2.mbuf) != MAGIC_SEQN[0]) {
printf("%d: second packet out not lower priority\n",
__LINE__);
rte_event_dev_dump(evdev, stdout);
}
const uint32_t MAGIC_SEQN = 4711;
- arp->seqn = MAGIC_SEQN;
+ *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
return -1;
}
- if (ev.mbuf->seqn != MAGIC_SEQN) {
+ if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
printf("%d: error magic sequence number not dequeued\n",
__LINE__);
return -1;
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
ev.flow_id = 7;
- arp->seqn = i;
+ *rte_event_pmd_selftest_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
ev.queue_id = t->qid[i];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
- arp->seqn = i;
+ *rte_event_pmd_selftest_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
ev.queue_id = t->qid[i];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = arp;
- arp->seqn = i;
+ *rte_event_pmd_selftest_seqn(arp) = i;
int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
if (err != 1) {
return -1;
}
for (i = 0; i < 3; i++) {
- if (ev[i].mbuf->seqn != 2-i) {
+ if (*rte_event_pmd_selftest_seqn(ev[i].mbuf) != 2-i) {
printf(
"%d: qid priority test: seqn %d incorrectly prioritized\n",
__LINE__, i);
ev.mbuf = arp;
ev.queue_id = 0;
ev.flow_id = 3;
- arp->seqn = MAGIC_SEQN;
+ *rte_event_pmd_selftest_seqn(arp) = MAGIC_SEQN;
err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
if (err != 1) {
}
err = test_event_dev_stats_get(evdev, &stats);
- if (ev.mbuf->seqn != MAGIC_SEQN) {
+ if (*rte_event_pmd_selftest_seqn(ev.mbuf) != MAGIC_SEQN) {
printf("%d: magic sequence number not dequeued\n", __LINE__);
return -1;
}
ev.queue_id = t->qid[0];
ev.op = RTE_EVENT_OP_NEW;
ev.mbuf = mbufs[i];
- mbufs[i]->seqn = MAGIC_SEQN + i;
+ *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
/* generate pkt and enqueue */
err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
/* Check to see if the sequence numbers are in expected order */
if (check_order) {
for (j = 0 ; j < deq_pkts ; j++) {
- if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
- printf(
- "%d: Incorrect sequence number(%d) from port %d\n",
- __LINE__, mbufs_out[j]->seqn, tx_port);
+ if (*rte_event_pmd_selftest_seqn(deq_ev[j].mbuf) !=
+ MAGIC_SEQN + j) {
+ printf("%d: Incorrect sequence number(%d) from port %d\n",
+ __LINE__,
+ *rte_event_pmd_selftest_seqn(mbufs_out[j]),
+ tx_port);
return -1;
}
}
return -ENOTSUP;
}
+int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
+
int rte_event_dev_selftest(uint8_t dev_id)
{
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
+ .name = "rte_event_pmd_selftest_seqn_dynfield",
+ .size = sizeof(rte_event_pmd_selftest_seqn_t),
+ .align = __alignof__(rte_event_pmd_selftest_seqn_t),
+ };
struct rte_eventdev *dev = &rte_eventdevs[dev_id];
- if (dev->dev_ops->dev_selftest != NULL)
+ if (dev->dev_ops->dev_selftest != NULL) {
+ rte_event_pmd_selftest_seqn_dynfield_offset =
+ rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
+ if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
+ return -ENOMEM;
return (*dev->dev_ops->dev_selftest)();
+ }
return -ENOTSUP;
}
#include <string.h>
#include <rte_common.h>
+#include <rte_compat.h>
#include <rte_config.h>
#include <rte_dev.h>
#include <rte_log.h>
#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_dyn.h>
#include "rte_eventdev.h"
#include "rte_event_timer_adapter_pmd.h"
*/
typedef int (*eventdev_selftest)(void);
+typedef uint32_t rte_event_pmd_selftest_seqn_t;
+extern int rte_event_pmd_selftest_seqn_dynfield_offset;
+
+/**
+ * Read test sequence number from mbuf.
+ *
+ * @param mbuf Structure to read from.
+ * @return pointer to test sequence number.
+ */
+__rte_internal
+static inline rte_event_pmd_selftest_seqn_t *
+rte_event_pmd_selftest_seqn(struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf,
+ rte_event_pmd_selftest_seqn_dynfield_offset,
+ rte_event_pmd_selftest_seqn_t *);
+}
struct rte_cryptodev;
# added in 20.11
rte_event_pmd_pci_probe_named;
};
+
+INTERNAL {
+ global:
+
+ rte_event_pmd_selftest_seqn_dynfield_offset;
+};