rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
if (!tail) {
- rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(
+ &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
+ cb, __ATOMIC_RELEASE);
} else {
while (tail->next)
tail = tail->next;
- tail->next = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
}
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
if (!tail) {
- rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(
+ &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
+ cb, __ATOMIC_RELEASE);
} else {
while (tail->next)
tail = tail->next;
- tail->next = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
}
rte_spinlock_unlock(&rte_eth_tx_cb_lock);
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- *prev_cb = cb->next;
+ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
ret = 0;
break;
}
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- *prev_cb = cb->next;
+ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
ret = 0;
break;
}
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
* on that queue.
*
* - After a short delay - where the delay is sufficient to allow any
- * in-flight callbacks to complete.
+ * in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ * used to detect when data plane threads have ceased referencing the
+ * callback memory.
*
* @param port_id
* The port identifier of the Ethernet device.
* on that queue.
*
* - After a short delay - where the delay is sufficient to allow any
- * in-flight callbacks to complete.
+ * in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ * used to detect when data plane threads have ceased referencing the
+ * callback memory.
*
* @param port_id
* The port identifier of the Ethernet device.
rx_pkts, nb_pkts);
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
- struct rte_eth_rxtx_callback *cb =
- dev->post_rx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *cb;
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
+ __ATOMIC_RELAXED);
+
+ if (unlikely(cb != NULL)) {
do {
nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
nb_pkts, cb->param);
#endif
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *cb;
+
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
+ __ATOMIC_RELAXED);
if (unlikely(cb != NULL)) {
do {