Always enable implicit release since we don't support explicit release
in datapath.
Master lcore is used only for printing stats so don't allocate event
port for it.
Fix service launch for event device without distributed scheduling.
Fixes:
bcb6f841d42a ("examples/l2fwd-event: setup service core")
Cc: stable@dpdk.org
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
int ret, i;
rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
int ret, i;
rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
- if (evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) {
+ if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
&service_id);
if (ret != -ESRCH && ret != 0)
ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
&service_id);
if (ret != -ESRCH && ret != 0)
/* Event device configurtion */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Event device configurtion */
rte_event_dev_info_get(event_d_id, &dev_info);
- evt_rsrc->disable_implicit_release = !!(dev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
- num_workers = rte_lcore_count() - rte_service_lcore_count();
+ /* Ignore Master core and service cores. */
+ num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
.nb_event_port_enqueue_depth = 128
};
struct rte_event_dev_info dev_info;
.nb_event_port_enqueue_depth = 128
};
struct rte_event_dev_info dev_info;
- uint8_t disable_implicit_release;
const uint8_t event_d_id = 0; /* Always use first event device only */
uint32_t event_queue_cfg = 0;
uint16_t ethdev_count = 0;
const uint8_t event_d_id = 0; /* Always use first event device only */
uint32_t event_queue_cfg = 0;
uint16_t ethdev_count = 0;
/* Event device configurtion */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Event device configurtion */
rte_event_dev_info_get(event_d_id, &dev_info);
- disable_implicit_release = !!(dev_info.event_dev_cap &
- RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
- evt_rsrc->disable_implicit_release =
- disable_implicit_release;
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
event_d_conf.nb_event_port_enqueue_depth =
dev_info.max_event_port_enqueue_depth;
- num_workers = rte_lcore_count();
+ /* Ignore Master core. */
+ num_workers = rte_lcore_count() - 1;
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;
if (dev_info.max_event_ports < num_workers)
num_workers = dev_info.max_event_ports;