/* Expects the service 's' is valid. */
static int32_t
service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
- struct rte_service_spec_impl *s)
+ struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
{
if (!s)
return -EINVAL;
cs->service_active_on_lcore[i] = 1;
- if (service_mt_safe(s) == 0) {
+ if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
return -EBUSY;
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
- /* Atomically add this core to the mapped cores first, then examine if
- * we can run the service. This avoids a race condition between
- * checking the value, and atomically adding to the mapped count.
+ /* Increment num_mapped_cores to reflect that this core is
+ * now mapped capable of running the service.
*/
- if (serialize_mt_unsafe)
- rte_atomic32_inc(&s->num_mapped_cores);
+ rte_atomic32_inc(&s->num_mapped_cores);
- if (service_mt_safe(s) == 0 &&
- rte_atomic32_read(&s->num_mapped_cores) > 1) {
- if (serialize_mt_unsafe)
- rte_atomic32_dec(&s->num_mapped_cores);
- return -EBUSY;
- }
-
- int ret = service_run(id, cs, UINT64_MAX, s);
+ int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
- if (serialize_mt_unsafe)
- rte_atomic32_dec(&s->num_mapped_cores);
+ rte_atomic32_dec(&s->num_mapped_cores);
return ret;
}
if (!service_valid(i))
continue;
/* return value ignored as no change to code flow */
- service_run(i, cs, service_mask, service_get(i));
+ service_run(i, cs, service_mask, service_get(i), 1);
}
cs->loops++;