#include <unistd.h>
-#include "rte_pmd_ark.h"
#include "ark_ethdev_rx.h"
#include "ark_global.h"
#include "ark_logs.h"
#define ARK_RX_META_SIZE 32
#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
-#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+#define ARK_RX_MPU_CHUNK (64U)
/* Forward declarations */
struct ark_rx_queue;
struct rte_mbuf *mbuf0,
uint32_t cons_index);
static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
-static int eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
- uint32_t *pnb,
- struct rte_mbuf **mbufs);
/* ************************************************************************* */
struct ark_rx_queue {
struct ark_udm_t *udm;
struct ark_mpu_t *mpu;
+ rx_user_meta_hook_fn rx_user_meta_hook;
+ void *ext_user_data;
+
+ uint32_t dataroom;
+ uint32_t headroom;
+
uint32_t queue_size;
uint32_t queue_mask;
/* The queue Index is used within the dpdk device structures */
uint16_t queue_index;
- uint32_t last_cons;
+ uint32_t unused;
- /* separate cache line */
- /* second cache line - fields only used in slow path */
+ /* next cache line - fields written by device */
RTE_MARKER cacheline1 __rte_cache_min_aligned;
volatile uint32_t prod_index; /* step 2 filled by FPGA */
} __rte_cache_aligned;
-
/* ************************************************************************* */
static int
eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
{
queue->cons_index = cons_index;
- eth_ark_rx_seed_mbufs(queue);
- if (((cons_index - queue->last_cons) >= 64U)) {
- queue->last_cons = cons_index;
+ if ((cons_index + queue->queue_size - queue->seed_index) >= ARK_RX_MPU_CHUNK) {
+ eth_ark_rx_seed_mbufs(queue);
ark_mpu_set_producer(queue->mpu, queue->seed_index);
}
}
/* NOTE zmalloc is used, no need to 0 indexes, etc. */
queue->mb_pool = mb_pool;
+ queue->dataroom = rte_pktmbuf_data_room_size(mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ queue->headroom = RTE_PKTMBUF_HEADROOM;
queue->phys_qid = qidx;
queue->queue_index = queue_idx;
queue->queue_size = nb_desc;
queue->queue_mask = nb_desc - 1;
+ queue->rx_user_meta_hook = ark->user_ext.rx_user_meta_hook;
+ queue->ext_user_data = ark->user_data[dev->data->port_id];
queue->reserve_q =
rte_zmalloc_socket("Ark_rx_queue mbuf",
nb_desc * sizeof(struct rte_mbuf *),
- 64,
+ 512,
socket_id);
queue->paddress_q =
rte_zmalloc_socket("Ark_rx_queue paddr",
nb_desc * sizeof(rte_iova_t),
- 64,
+ 512,
socket_id);
if (queue->reserve_q == 0 || queue->paddress_q == 0) {
queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+ /* Configure UDM per queue */
+ ark_udm_stop(queue->udm, 0);
+ ark_udm_configure(queue->udm,
+ RTE_PKTMBUF_HEADROOM,
+ queue->dataroom,
+ ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(queue->udm);
+ ark_udm_stop(queue->udm, 0);
+
/* populate mbuf reserve */
status = eth_ark_rx_seed_mbufs(queue);
return 0;
}
-/* ************************************************************************* */
-uint16_t
-eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
- struct rte_mbuf **rx_pkts __rte_unused,
- uint16_t nb_pkts __rte_unused)
-{
- return 0;
-}
-
/* ************************************************************************* */
uint16_t
eth_ark_recv_pkts(void *rx_queue,
struct ark_rx_queue *queue;
register uint32_t cons_index, prod_index;
uint16_t nb;
+ uint16_t i;
struct rte_mbuf *mbuf;
+ struct rte_mbuf **pmbuf;
struct ark_rx_meta *meta;
+ rx_user_meta_hook_fn rx_user_meta_hook;
queue = (struct ark_rx_queue *)rx_queue;
if (unlikely(queue == 0))
return 0;
prod_index = queue->prod_index;
cons_index = queue->cons_index;
+ if (prod_index == cons_index)
+ return 0;
nb = 0;
while (prod_index != cons_index) {
/* META DATA embedded in headroom */
meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
- mbuf->port = meta->port;
mbuf->pkt_len = meta->pkt_len;
mbuf->data_len = meta->pkt_len;
- mbuf->timestamp = meta->timestamp;
- rte_pmd_ark_mbuf_rx_userdata_set(mbuf, meta->user_data);
if (ARK_DEBUG_CORE) { /* debug sanity checks */
+
if ((meta->pkt_len > (1024 * 16)) ||
(meta->pkt_len == 0)) {
ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
mbuf->pkt_len = 63;
meta->pkt_len = 63;
}
- /* seqn is only set under debug */
- mbuf->seqn = cons_index;
}
- if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+ if (unlikely(meta->pkt_len > queue->dataroom))
cons_index = eth_ark_rx_jumbo
(queue, meta, mbuf, cons_index + 1);
else
break;
}
- if (unlikely(nb != 0))
- /* report next free to FPGA */
- eth_ark_rx_update_cons_index(queue, cons_index);
+ rx_user_meta_hook = queue->rx_user_meta_hook;
+ for (pmbuf = rx_pkts, i = 0; rx_user_meta_hook && i < nb; i++) {
+ mbuf = *pmbuf++;
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+ rx_user_meta_hook(mbuf, meta->user_meta, queue->ext_user_data);
+ }
+
+ eth_ark_rx_update_cons_index(queue, cons_index);
return nb;
}
/* first buf populated by called */
mbuf_prev = mbuf0;
segments = 1;
- data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ data_len = RTE_MIN(meta->pkt_len, queue->dataroom);
remaining = meta->pkt_len - data_len;
mbuf0->data_len = data_len;
/* HW guarantees that the data does not exceed prod_index! */
while (remaining != 0) {
data_len = RTE_MIN(remaining,
- RTE_MBUF_DEFAULT_DATAROOM +
- RTE_PKTMBUF_HEADROOM);
+ queue->dataroom);
remaining -= data_len;
segments += 1;
mbuf_prev->next = mbuf;
mbuf_prev = mbuf;
mbuf->data_len = data_len;
- mbuf->data_off = 0;
- if (ARK_DEBUG_CORE)
- mbuf->seqn = cons_index; /* for debug only */
cons_index += 1;
}
}
uint32_t
-eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+eth_ark_dev_rx_queue_count(void *rx_queue)
{
struct ark_rx_queue *queue;
- queue = dev->data->rx_queues[queue_id];
+ queue = rx_queue;
return (queue->prod_index - queue->cons_index); /* mod arith */
}
static inline int
eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
{
- uint32_t limit = queue->cons_index + queue->queue_size;
+ uint32_t limit = (queue->cons_index & ~(ARK_RX_MPU_CHUNK - 1)) +
+ queue->queue_size;
uint32_t seed_index = queue->seed_index;
uint32_t count = 0;
int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
if (unlikely(status != 0)) {
- /* Try to recover from lack of mbufs in pool */
- status = eth_ark_rx_seed_recovery(queue, &nb, mbufs);
- if (unlikely(status != 0)) {
- return -1;
- }
+ ARK_PMD_LOG(NOTICE,
+ "Could not allocate %u mbufs from pool"
+ " for RX queue %u;"
+ " %u free buffers remaining in queue\n",
+ nb, queue->queue_index,
+ queue->seed_index - queue->cons_index);
+ return -1;
}
if (ARK_DEBUG_CORE) { /* DEBUG */
return 0;
}
-int
-eth_ark_rx_seed_recovery(struct ark_rx_queue *queue,
- uint32_t *pnb,
- struct rte_mbuf **mbufs)
-{
- int status = -1;
-
- /* Ignore small allocation failures */
- if (*pnb <= 64)
- return -1;
-
- *pnb = 64U;
- status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb);
- if (status != 0) {
- ARK_PMD_LOG(NOTICE,
- "ARK: Could not allocate %u mbufs from pool for RX queue %u;"
- " %u free buffers remaining in queue\n",
- *pnb, queue->queue_index,
- queue->seed_index - queue->cons_index);
- }
- return status;
-}
-
void
eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
const char *msg)
ark_mpu_start(queue->mpu);
/* Add some buffers */
- index = 100000 + queue->seed_index;
+ index = ARK_RX_MPU_CHUNK + queue->seed_index;
ark_mpu_set_producer(queue->mpu, index);
}
/* Wait to allow data to pass */
usleep(100);
- ARK_PMD_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
- ark_udm_is_flushed(ark->udm.v));
+ ARK_PMD_LOG(NOTICE, "UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
}
ark_udm_reset(ark->udm.v);
}
{
uint16_t i, j;
- ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n",
- mbuf, mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
+ mbuf, mbuf->pkt_len, mbuf->data_off);
for (i = lo; i < hi; i += 16) {
uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);