#include <rte_mbuf.h>
#include <rte_memory.h>
#include <rte_cycles.h>
-#include <rte_compat.h>
+#include <rte_function_versioning.h>
#include <rte_memzone.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
#include <rte_pause.h>
+#include <rte_tailq.h>
-#include "rte_distributor_private.h"
#include "rte_distributor.h"
#include "rte_distributor_v20.h"
#include "rte_distributor_v1705.h"
+#include "distributor_private.h"
TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
}
retptr64 = &(buf->retptr64[0]);
- /* Spin while handshake bits are set (scheduler clears it) */
- while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
+ /* Spin while handshake bits are set (scheduler clears it).
+ * Sync with worker on GET_BUF flag.
+ */
+ while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
+ & RTE_DISTRIB_GET_BUF)) {
rte_pause();
uint64_t t = rte_rdtsc()+100;
/*
* Finally, set the GET_BUF to signal to distributor that cache
* line is ready for processing
+ * Sync with distributor to release retptrs
*/
- *retptr64 |= RTE_DISTRIB_GET_BUF;
+ __atomic_store_n(retptr64, *retptr64 | RTE_DISTRIB_GET_BUF,
+ __ATOMIC_RELEASE);
}
BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
return (pkts[0]) ? 1 : 0;
}
- /* If bit is set, return */
- if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
+ /* If bit is set, return
+ * Sync with distributor to acquire bufptrs
+ */
+ if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
+ & RTE_DISTRIB_GET_BUF)
return -1;
/* since bufptr64 is signed, this should be an arithmetic shift */
* so now we've got the contents of the cacheline into an array of
* mbuf pointers, so toggle the bit so scheduler can start working
* on the next cacheline while we're working.
+ * Sync with distributor on GET_BUF flag. Release bufptrs.
*/
- buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF;
+ __atomic_store_n(&(buf->bufptr64[0]),
+ buf->bufptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
return count;
}
return -EINVAL;
}
+ /* Sync with distributor to acquire retptrs */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
/* Switch off the return bit first */
buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
- /* set the GET_BUF but even if we got no returns */
- buf->retptr64[0] |= RTE_DISTRIB_GET_BUF;
+ /* set the GET_BUF but even if we got no returns.
+ * Sync with distributor on GET_BUF flag. Release retptrs.
+ */
+ __atomic_store_n(&(buf->retptr64[0]),
+ buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
return 0;
}
unsigned int count = 0;
unsigned int i;
- if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) {
+ /* Sync on GET_BUF flag. Acquire retptrs. */
+ if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
+ & RTE_DISTRIB_GET_BUF) {
for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
oldbuf = ((uintptr_t)(buf->retptr64[i] >>
}
d->returns.start = ret_start;
d->returns.count = ret_count;
- /* Clear for the worker to populate with more returns */
- buf->retptr64[0] = 0;
+ /* Clear for the worker to populate with more returns.
+ * Sync with distributor on GET_BUF flag. Release retptrs.
+ */
+ __atomic_store_n(&(buf->retptr64[0]), 0, __ATOMIC_RELEASE);
}
return count;
}
struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
unsigned int i;
- while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF))
+ /* Sync with worker on GET_BUF flag */
+ while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
+ & RTE_DISTRIB_GET_BUF))
rte_pause();
handle_returns(d, wkr);
d->backlog[wkr].count = 0;
- /* Clear the GET bit */
- buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF;
+ /* Clear the GET bit.
+ * Sync with worker on GET_BUF flag. Release bufptrs.
+ */
+ __atomic_store_n(&(buf->bufptr64[0]),
+ buf->bufptr64[0] & ~RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
return buf->count;
}
if (unlikely(num_mbufs == 0)) {
/* Flush out all non-full cache-lines to workers. */
for (wid = 0 ; wid < d->num_workers; wid++) {
- if (d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF) {
+ /* Sync with worker on GET_BUF flag. */
+ if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
release(d, wid);
handle_returns(d, wid);
}
uint16_t matches[RTE_DIST_BURST_SIZE];
unsigned int pkts;
- if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)
+ /* Sync with worker on GET_BUF flag. */
+ if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
d->bufs[wkr].count = 0;
if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
/* Flush out all non-full cache-lines to workers. */
for (wid = 0 ; wid < d->num_workers; wid++)
- if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
+ /* Sync with worker on GET_BUF flag. */
+ if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
+ __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
release(d, wid);
return num_mbufs;
while (total_outstanding(d) > 0)
rte_distributor_process(d, NULL, 0);
+ /* wait 10ms to allow all worker drain the pkts */
+ rte_delay_us(10000);
+
/*
* Send empty burst to all workers to allow them to exit
* gracefully, should they need to.
/* throw away returns, so workers can exit */
for (wkr = 0; wkr < d->num_workers; wkr++)
- d->bufs[wkr].retptr64[0] = 0;
+ /* Sync with worker. Release retptrs. */
+ __atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
+ __ATOMIC_RELEASE);
}
BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
+ if (name == NULL || num_workers >=
+ (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
if (alg_type == RTE_DIST_ALG_SINGLE) {
d = malloc(sizeof(struct rte_distributor));
if (d == NULL) {
return d;
}
- if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
- rte_errno = EINVAL;
- return NULL;
- }
-
snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
if (mz == NULL) {
}
d = mz->addr;
- snprintf(d->name, sizeof(d->name), "%s", name);
+ strlcpy(d->name, name, sizeof(d->name));
d->num_workers = num_workers;
d->alg_type = alg_type;
rte_dist_burst_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
TAILQ_INSERT_TAIL(dist_burst_list, d, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return d;
}