true : false;
}
-static inline uint32_t
-evt_sched_type2queue_cfg(uint8_t sched_type)
-{
- uint32_t ret;
-
- switch (sched_type) {
- case RTE_SCHED_TYPE_ATOMIC:
- ret = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
- break;
- case RTE_SCHED_TYPE_ORDERED:
- ret = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
- break;
- case RTE_SCHED_TYPE_PARALLEL:
- ret = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
- break;
- default:
- rte_panic("Invalid sched_type %d\n", sched_type);
- }
- return ret;
-}
-
#endif /* _EVT_COMMON_*/
/* q0 (ordered queue) configuration */
struct rte_event_queue_conf q0_ordered_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
.nb_atomic_flows = opt->nb_flows,
.nb_atomic_order_sequences = opt->nb_flows,
};
/* q1 (atomic queue) configuration */
struct rte_event_queue_conf q1_atomic_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.nb_atomic_flows = opt->nb_flows,
.nb_atomic_order_sequences = opt->nb_flows,
};
};
/* queue configurations */
for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
- q_conf.event_queue_cfg = evt_sched_type2queue_cfg
- (opt->sched_type_list[queue % nb_stages]);
+ q_conf.schedule_type =
+ (opt->sched_type_list[queue % nb_stages]);
if (opt->q_priority) {
uint8_t stage_pos = queue % nb_stages;
RTE_SET_USED(queue_conf);
queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
- queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
- RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
+ RTE_SCHED_TYPE_PARALLEL;
queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
}
{
int type;
- /* SINGLE_LINK can be OR-ed with other types, so handle first */
+ type = conf->schedule_type;
+
if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
type = SW_SCHED_TYPE_DIRECT;
- } else {
- switch (conf->event_queue_cfg) {
- case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
- type = RTE_SCHED_TYPE_ATOMIC;
- break;
- case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
- type = RTE_SCHED_TYPE_ORDERED;
- break;
- case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
- type = RTE_SCHED_TYPE_PARALLEL;
- break;
- case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
- SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
- return -ENOTSUP;
- default:
- SW_LOG_ERR("Unknown queue type %d requested\n",
- conf->event_queue_cfg);
- return -EINVAL;
- }
+ } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ & conf->event_queue_cfg) {
+ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+ return -ENOTSUP;
}
struct sw_evdev *sw = sw_pmd_priv(dev);
static const struct rte_event_queue_conf default_conf = {
.nb_atomic_flows = 4096,
.nb_atomic_order_sequences = 1,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
};
static struct config_data cdata = {
.num_packets = (1L << 25), /* do ~32M packets */
.num_fids = 512,
- .queue_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .queue_type = RTE_SCHED_TYPE_ATOMIC,
.next_qid = {-1},
.qid = {-1},
.num_stages = 1,
cdata.enable_queue_priorities = 1;
break;
case 'o':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
break;
case 'p':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
break;
case 'q':
cdata.quiet = 1;
.new_event_threshold = 4096,
};
struct rte_event_queue_conf wkr_q_conf = {
- .event_queue_cfg = cdata.queue_type,
+ .schedule_type = cdata.queue_type,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
}
const char *type_str = "Atomic";
- switch (wkr_q_conf.event_queue_cfg) {
- case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
type_str = "Ordered";
break;
- case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ case RTE_SCHED_TYPE_PARALLEL:
type_str = "Parallel";
break;
}
printf("\tworkers: %u\n", cdata.num_workers);
printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
printf("\tqid0 type: ordered\n");
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
printf("\tqid0 type: atomic\n");
printf("\tCores available: %u\n", rte_lcore_count());
printf("\tCores used: %u\n", cores_needed);
{
if (queue_conf &&
!(queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ATOMIC)
))
return 1;
else
{
if (queue_conf &&
!(queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
- ((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
((queue_conf->event_queue_cfg &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK)
- == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ORDERED)
))
return 1;
else
#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
/**< Event device is capable of enqueuing events of any type to any queue.
* If this capability is not set, the queue only supports events of the
- * *RTE_EVENT_QUEUE_CFG_* type that it was created with.
+ * *RTE_SCHED_TYPE_* type that it was created with.
*
- * @see RTE_EVENT_QUEUE_CFG_* values
+ * @see RTE_SCHED_TYPE_* values
*/
#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
/**< Event device is capable of operating in burst mode for enqueue(forward,
/* Event queue specific APIs */
/* Event queue configuration bitmap flags */
-#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
-/**< Mask for event queue schedule type configuration request */
-#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
*
* @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
* @see rte_event_enqueue_burst()
*/
-#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
-/**< Allow only ATOMIC schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC
- *
- * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
-/**< Allow only ORDERED schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED
- *
- * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
-/**< Allow only PARALLEL schedule type enqueue
- *
- * The rte_event_enqueue_burst() result is undefined if the queue configured
- * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL
- *
- * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst()
- */
-#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
/**< This event queue links only to a single event port.
*
* @see rte_event_port_setup(), rte_event_port_link()
uint32_t nb_atomic_flows;
/**< The maximum number of active flows this queue can track at any
* given time. If the queue is configured for atomic scheduling (by
- * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES or
- * RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY flags to event_queue_cfg), then the
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
+ * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
* value must be in the range of [1, nb_event_queue_flows], which was
* previously provided in rte_event_dev_configure().
*/
* event will be returned from dequeue until one or more entries are
* freed up/released.
* If the queue is configured for ordered scheduling (by applying the
- * RTE_EVENT_QUEUE_CFG_ALL_TYPES or RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
- * flags to event_queue_cfg), then the value must be in the range of
- * [1, nb_event_queue_flows], which was previously supplied to
- * rte_event_dev_configure().
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
+ * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
+ * be in the range of [1, nb_event_queue_flows], which was
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg;
+ /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t schedule_type;
+ /**< Queue schedule type(RTE_SCHED_TYPE_*).
+ * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
+ * event_queue_cfg.
*/
- uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
uint8_t priority;
/**< Priority for this event queue relative to other event queues.
* The requested priority should in the range of
/* Negative cases */
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
qconf.nb_atomic_flows = info.max_event_queue_flows;
- qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
- RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
/* Assume PMD doesn't support atomic flows, return early */
return -ENOTSUP;
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY;
+ qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
/* Assume PMD doesn't support reordering */
return -ENOTSUP;
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
- qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
for (i = 0; i < (int)queue_count; i++) {
ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
/* Q creation */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = flags,
+ .schedule_type = flags,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
static inline int
create_atomic_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
}
static inline int
create_ordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
}
static inline int
create_unordered_qids(struct test *t, int num_qids)
{
- return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
+ return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
}
static inline int
const uint32_t NUM_ITERS = 32;
for (i = 0; i < NUM_ITERS; i++) {
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
static const struct rte_event_queue_conf conf_lb_atomic = {
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
};
}
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
for (i = 0; i < 3; i++) {
/* Create QID */
const struct rte_event_queue_conf conf = {
- .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
/* increase priority (0 == highest), as we go */
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
.nb_atomic_flows = 1024,