#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
-
#include <rte_eventdev.h>
+#include <rte_pause.h>
+
#include "test.h"
#define MAX_PORTS 16
/* Q creation */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = flags,
+ .schedule_type = flags,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
static inline int
create_atomic_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
}
static inline int
create_ordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
}
static inline int
create_unordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
}
static inline int
static const struct rte_event_queue_conf conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
};
for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
return 0;
}
+static int
+test_directed_forward_credits(struct test *t)
+{
+ uint32_t i;
+ int32_t err;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = 0,
+ };
+
+ for (i = 0; i < 1000; i++) {
+ err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_event_schedule(evdev);
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ /* re-write event to be a forward, and continue looping it */
+ ev.op = RTE_EVENT_OP_FORWARD;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
static int
test_priority_directed(struct test *t)
ret = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, xstats_names, ids, XSTATS_MAX);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
return -1;
}
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, ids, values, ret);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
return -1;
}
3 /* inflights */,
512 /* iq size */,
0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
- 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 3,
+ 0, 0,
};
for (i = 0; (signed int)i < ret; i++) {
if (queue_expected[i] != values[i]) {
3 /* inflight */,
512 /* iq size */,
0, 0, 0, 0, /* 4 iq used */
- 0, 0, 1, 0, /* qid to port pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
const uint32_t NUM_ITERS = 32;
for (i = 0; i < NUM_ITERS; i++) {
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
static const struct rte_event_queue_conf conf_lb_atomic = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
};
static const struct rte_event_queue_conf conf_single_link = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
};
if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
printf("%d: error creating qid\n", __LINE__);
goto fail;
/* num queue stats */
-#define NUM_Q_STATS 13
+#define NUM_Q_STATS 17
/* queue offset from start of the devices whole xstats.
* This will break every time we add a statistic to a device/port/queue
*/
"qid_0_iq_2_used",
"qid_0_iq_3_used",
"qid_0_port_0_pinned_flows",
+ "qid_0_port_0_packets",
"qid_0_port_1_pinned_flows",
+ "qid_0_port_1_packets",
"qid_0_port_2_pinned_flows",
+ "qid_0_port_2_packets",
"qid_0_port_3_pinned_flows",
+ "qid_0_port_3_packets",
};
uint64_t queue_expected[] = {
7, /* rx */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 7,
+ 0, 0,
};
uint64_t queue_expected_zero[] = {
0, /* rx */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
RTE_DIM(queue_names) != NUM_Q_STATS) {
printf("%d : queue array of wrong size\n", __LINE__);
goto fail;
failed = 1;
}
if (val != queue_expected[i]) {
- printf("%d: %s value incorrect, expected %"PRIu64
- " got %d\n", __LINE__, queue_names[i],
- queue_expected[i], id);
+ printf("%d: %d: %s value , expected %"PRIu64
+ " got %"PRIu64"\n", i, __LINE__,
+ queue_names[i], queue_expected[i], val);
failed = 1;
}
/* reset to zero */
}
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
for (i = 0; i < 3; i++) {
/* Create QID */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
/* increase priority (0 == highest), as we go */
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
.nb_atomic_flows = 1024,
int i;
uint32_t deq_pkts, j;
struct rte_mbuf *mbufs[3];
- struct rte_mbuf *mbufs_out[3];
+ struct rte_mbuf *mbufs_out[3] = { 0 };
const uint32_t MAGIC_SEQN = 1234;
/* Create instance with 4 ports */
}
}
t->mbuf_pool = eventdev_func_mempool;
-
printf("*** Running Single Directed Packet test...\n");
ret = test_single_directed_packet(t);
if (ret != 0) {
printf("ERROR - Single Directed Packet test FAILED.\n");
return ret;
}
+ printf("*** Running Directed Forward Credit test...\n");
+ ret = test_directed_forward_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Directed Forward Credit test FAILED.\n");
+ return ret;
+ }
printf("*** Running Single Load Balanced Packet test...\n");
ret = single_packet(t);
if (ret != 0) {