#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include <poll.h>
#include <netinet/in.h>
#include <net/if.h>
#include <sys/socket.h>
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_spinlock.h>
+#include <rte_power_intrinsics.h>
#include "compat.h"
#define PF_XDP AF_XDP
#endif
-RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
#define AF_XDP_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, af_xdp_logtype, \
nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
if (nb_pkts == 0) {
- if (syscall_needed(&rxq->fq, rxq->busy_budget))
- recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
- MSG_DONTWAIT, NULL, NULL);
+ /* we can assume a kernel >= 5.11 is in use if busy polling is
+ * enabled and thus we can safely use the recvfrom() syscall
+ * which is only supported for AF_XDP sockets in kernels >=
+ * 5.11.
+ */
+ if (rxq->busy_budget) {
+ (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
+ MSG_DONTWAIT, NULL, NULL);
+ } else if (xsk_ring_prod__needs_wakeup(fq)) {
+ (void)poll(&rxq->fds[0], 1, 1000);
+ }
return 0;
}
if (nb_pkts == 0) {
#if defined(XDP_USE_NEED_WAKEUP)
if (xsk_ring_prod__needs_wakeup(fq))
- recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
- MSG_DONTWAIT, NULL, NULL);
+ (void)poll(rxq->fds, 1, 1000);
#endif
return 0;
}
pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
- if (syscall_needed(&txq->tx, txq->pair->busy_budget))
+ if (tx_syscall_needed(&txq->tx))
while (send(xsk_socket__fd(txq->pair->xsk), NULL,
0, MSG_DONTWAIT) < 0) {
/* some thing unexpected */
if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
rte_pktmbuf_free(local_mbuf);
- kick_tx(txq, cq);
goto out;
}
tx_bytes += mbuf->pkt_len;
}
- kick_tx(txq, cq);
-
out:
xsk_ring_prod__submit(&txq->tx, count);
+ kick_tx(txq, cq);
txq->stats.tx_pkts += count;
txq->stats.tx_bytes += tx_bytes;
return 0;
}
+#define CLB_VAL_IDX 0
+static int
+eth_monitor_callback(const uint64_t value,
+ const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
+{
+ const uint64_t v = opaque[CLB_VAL_IDX];
+ const uint64_t m = (uint32_t)~0;
+
+ /* if the value has changed, abort entering power optimized state */
+ return (value & m) == v ? 0 : -1;
+}
+
+static int
+eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+ struct pkt_rx_queue *rxq = rx_queue;
+ unsigned int *prod = rxq->rx.producer;
+ const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
+
+ /* watch for changes in producer ring */
+ pmc->addr = (void *)prod;
+
+ /* store current value */
+ pmc->opaque[CLB_VAL_IDX] = cur_val;
+ pmc->fn = eth_monitor_callback;
+
+ /* AF_XDP producer ring index is 32-bit */
+ pmc->size = sizeof(uint32_t);
+
+ return 0;
+}
+
static int
eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .get_monitor_addr = eth_get_monitor_addr,
};
/** parse busy_budget argument */