1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2018 Atomic Rules LLC
7 #include "ark_ethdev_rx.h"
8 #include "ark_global.h"
13 #define ARK_RX_META_SIZE 32
14 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
16 /* Forward declarations */
20 static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
21 static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
22 static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
23 struct ark_rx_meta *meta,
24 struct rte_mbuf *mbuf0,
26 static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
28 /* ************************************************************************* */
30 /* array of mbufs to populate */
31 struct rte_mbuf **reserve_q;
32 /* array of physical addresses of the mbuf data pointer */
33 /* This point is a virtual address */
34 rte_iova_t *paddress_q;
35 struct rte_mempool *mb_pool;
37 struct ark_udm_t *udm;
38 struct ark_mpu_t *mpu;
40 rx_user_meta_hook_fn rx_user_meta_hook;
49 uint32_t seed_index; /* step 1 set with empty mbuf */
50 uint32_t cons_index; /* step 3 consumed by driver */
52 /* The queue Id is used to identify the HW Q */
55 /* The queue Index is used within the dpdk device structures */
60 /* next cache line - fields written by device */
61 RTE_MARKER cacheline1 __rte_cache_min_aligned;
63 volatile uint32_t prod_index; /* step 2 filled by FPGA */
64 } __rte_cache_aligned;
66 /* ************************************************************************* */
68 eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
69 struct ark_rx_queue *queue,
70 uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
72 rte_iova_t queue_base;
73 rte_iova_t phys_addr_q_base;
74 rte_iova_t phys_addr_prod_index;
76 queue_base = rte_malloc_virt2iova(queue);
77 phys_addr_prod_index = queue_base +
78 offsetof(struct ark_rx_queue, prod_index);
80 phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
83 if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
84 ARK_PMD_LOG(ERR, "Illegal configuration rx queue\n");
88 /* Stop and Reset and configure MPU */
89 ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
91 ark_udm_write_addr(queue->udm, phys_addr_prod_index);
93 /* advance the valid pointer, but don't start until the queue starts */
94 ark_mpu_reset_stats(queue->mpu);
96 /* The seed is the producer index for the HW */
97 ark_mpu_set_producer(queue->mpu, queue->seed_index);
98 dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
104 eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
106 queue->cons_index = cons_index;
107 if ((cons_index + queue->queue_size - queue->seed_index) >= 64U) {
108 eth_ark_rx_seed_mbufs(queue);
109 ark_mpu_set_producer(queue->mpu, queue->seed_index);
113 /* ************************************************************************* */
115 eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
118 unsigned int socket_id,
119 const struct rte_eth_rxconf *rx_conf,
120 struct rte_mempool *mb_pool)
122 static int warning1; /* = 0 */
123 struct ark_adapter *ark = dev->data->dev_private;
125 struct ark_rx_queue *queue;
129 int qidx = queue_idx;
131 /* We may already be setup, free memory prior to re-allocation */
132 if (dev->data->rx_queues[queue_idx] != NULL) {
133 eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
134 dev->data->rx_queues[queue_idx] = NULL;
137 if (rx_conf != NULL && warning1 == 0) {
140 "Arkville ignores rte_eth_rxconf argument.\n");
143 if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
145 "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
146 ARK_RX_META_SIZE, __func__);
147 return -1; /* ERROR CODE */
150 if (!rte_is_power_of_2(nb_desc)) {
152 "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
154 return -1; /* ERROR CODE */
157 /* Allocate queue struct */
158 queue = rte_zmalloc_socket("Ark_rxqueue",
159 sizeof(struct ark_rx_queue),
163 ARK_PMD_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
167 /* NOTE zmalloc is used, no need to 0 indexes, etc. */
168 queue->mb_pool = mb_pool;
169 queue->dataroom = rte_pktmbuf_data_room_size(mb_pool) -
170 RTE_PKTMBUF_HEADROOM;
171 queue->headroom = RTE_PKTMBUF_HEADROOM;
172 queue->phys_qid = qidx;
173 queue->queue_index = queue_idx;
174 queue->queue_size = nb_desc;
175 queue->queue_mask = nb_desc - 1;
176 queue->rx_user_meta_hook = ark->user_ext.rx_user_meta_hook;
177 queue->ext_user_data = ark->user_data[dev->data->port_id];
180 rte_zmalloc_socket("Ark_rx_queue mbuf",
181 nb_desc * sizeof(struct rte_mbuf *),
185 rte_zmalloc_socket("Ark_rx_queue paddr",
186 nb_desc * sizeof(rte_iova_t),
190 if (queue->reserve_q == 0 || queue->paddress_q == 0) {
192 "Failed to allocate queue memory in %s\n",
194 rte_free(queue->reserve_q);
195 rte_free(queue->paddress_q);
200 dev->data->rx_queues[queue_idx] = queue;
201 queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
202 queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
204 /* Configure UDM per queue */
205 ark_udm_stop(queue->udm, 0);
206 ark_udm_configure(queue->udm,
207 RTE_PKTMBUF_HEADROOM,
209 ARK_RX_WRITE_TIME_NS);
210 ark_udm_stats_reset(queue->udm);
211 ark_udm_stop(queue->udm, 0);
213 /* populate mbuf reserve */
214 status = eth_ark_rx_seed_mbufs(queue);
216 if (queue->seed_index != nb_desc) {
217 ARK_PMD_LOG(ERR, "Failed to allocate %u mbufs for RX queue %d\n",
223 status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
225 if (unlikely(status != 0)) {
226 struct rte_mbuf **mbuf;
228 ARK_PMD_LOG(ERR, "Failed to initialize RX queue %d %s\n",
231 /* Free the mbufs allocated */
232 for (i = 0, mbuf = queue->reserve_q;
233 i < queue->seed_index; ++i, mbuf++) {
234 rte_pktmbuf_free(*mbuf);
236 rte_free(queue->reserve_q);
237 rte_free(queue->paddress_q);
239 return -1; /* ERROR CODE */
245 /* ************************************************************************* */
247 eth_ark_recv_pkts(void *rx_queue,
248 struct rte_mbuf **rx_pkts,
251 struct ark_rx_queue *queue;
252 register uint32_t cons_index, prod_index;
255 struct rte_mbuf *mbuf;
256 struct rte_mbuf **pmbuf;
257 struct ark_rx_meta *meta;
258 rx_user_meta_hook_fn rx_user_meta_hook;
260 queue = (struct ark_rx_queue *)rx_queue;
261 if (unlikely(queue == 0))
263 if (unlikely(nb_pkts == 0))
265 prod_index = queue->prod_index;
266 cons_index = queue->cons_index;
267 if (prod_index == cons_index)
271 while (prod_index != cons_index) {
272 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
274 rte_mbuf_prefetch_part1(mbuf);
275 rte_mbuf_prefetch_part2(mbuf);
277 /* META DATA embedded in headroom */
278 meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
280 mbuf->pkt_len = meta->pkt_len;
281 mbuf->data_len = meta->pkt_len;
283 if (ARK_DEBUG_CORE) { /* debug sanity checks */
285 if ((meta->pkt_len > (1024 * 16)) ||
286 (meta->pkt_len == 0)) {
287 ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
290 " seed_index %" PRIU32
298 ARK_PMD_LOG(DEBUG, " : UDM"
301 queue->udm->rt_cfg.prod_idx,
303 ark_mpu_dump(queue->mpu,
306 dump_mbuf_data(mbuf, 0, 256);
307 /* its FUBAR so fix it */
313 if (unlikely(meta->pkt_len > queue->dataroom))
314 cons_index = eth_ark_rx_jumbo
315 (queue, meta, mbuf, cons_index + 1);
325 rx_user_meta_hook = queue->rx_user_meta_hook;
326 for (pmbuf = rx_pkts, i = 0; rx_user_meta_hook && i < nb; i++) {
328 meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
329 rx_user_meta_hook(mbuf, meta->user_meta, queue->ext_user_data);
332 eth_ark_rx_update_cons_index(queue, cons_index);
337 /* ************************************************************************* */
339 eth_ark_rx_jumbo(struct ark_rx_queue *queue,
340 struct ark_rx_meta *meta,
341 struct rte_mbuf *mbuf0,
344 struct rte_mbuf *mbuf_prev;
345 struct rte_mbuf *mbuf;
351 /* first buf populated by called */
354 data_len = RTE_MIN(meta->pkt_len, queue->dataroom);
355 remaining = meta->pkt_len - data_len;
356 mbuf0->data_len = data_len;
358 /* HW guarantees that the data does not exceed prod_index! */
359 while (remaining != 0) {
360 data_len = RTE_MIN(remaining,
363 remaining -= data_len;
366 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
367 mbuf_prev->next = mbuf;
369 mbuf->data_len = data_len;
374 mbuf0->nb_segs = segments;
378 /* Drain the internal queue allowing hw to clear out. */
380 eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
382 register uint32_t cons_index;
383 struct rte_mbuf *mbuf;
385 cons_index = queue->cons_index;
387 /* NOT performance optimized, since this is a one-shot call */
388 while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
389 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
390 rte_pktmbuf_free(mbuf);
392 eth_ark_rx_update_cons_index(queue, cons_index);
397 eth_ark_dev_rx_queue_count(void *rx_queue)
399 struct ark_rx_queue *queue;
402 return (queue->prod_index - queue->cons_index); /* mod arith */
405 /* ************************************************************************* */
407 eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
409 struct ark_rx_queue *queue;
411 queue = dev->data->rx_queues[queue_id];
415 dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
417 ark_mpu_set_producer(queue->mpu, queue->seed_index);
418 ark_mpu_start(queue->mpu);
420 ark_udm_queue_enable(queue->udm, 1);
425 /* ************************************************************************* */
427 /* Queue can be restarted. data remains
430 eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
432 struct ark_rx_queue *queue;
434 queue = dev->data->rx_queues[queue_id];
438 ark_udm_queue_enable(queue->udm, 0);
440 dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
445 /* ************************************************************************* */
447 eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
449 uint32_t limit = queue->cons_index + queue->queue_size;
450 uint32_t seed_index = queue->seed_index;
453 uint32_t seed_m = queue->seed_index & queue->queue_mask;
455 uint32_t nb = limit - seed_index;
457 /* Handle wrap around -- remainder is filled on the next call */
458 if (unlikely(seed_m + nb > queue->queue_size))
459 nb = queue->queue_size - seed_m;
461 struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
462 int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
464 if (unlikely(status != 0)) {
466 "Could not allocate %u mbufs from pool"
468 " %u free buffers remaining in queue\n",
469 nb, queue->queue_index,
470 queue->seed_index - queue->cons_index);
474 if (ARK_DEBUG_CORE) { /* DEBUG */
475 while (count != nb) {
476 struct rte_mbuf *mbuf_init =
477 queue->reserve_q[seed_m + count];
479 memset(mbuf_init->buf_addr, -1, 512);
480 *((uint32_t *)mbuf_init->buf_addr) =
482 *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
488 queue->seed_index += nb;
490 /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
493 while (count != nb) {
494 queue->paddress_q[seed_m++] =
495 (*mbufs++)->buf_iova;
499 queue->paddress_q[seed_m++] =
500 (*mbufs++)->buf_iova;
504 queue->paddress_q[seed_m++] =
505 (*mbufs++)->buf_iova;
509 queue->paddress_q[seed_m++] =
510 (*mbufs++)->buf_iova;
514 } /* while (count != nb) */
521 eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
524 struct ark_rx_queue *queue;
526 queue = dev->data->rx_queues[queue_id];
528 ark_ethdev_rx_dump(msg, queue);
531 /* ************************************************************************* */
532 /* Call on device closed no user API, queue is stopped */
534 eth_ark_dev_rx_queue_release(void *vqueue)
536 struct ark_rx_queue *queue;
539 queue = (struct ark_rx_queue *)vqueue;
543 ark_udm_queue_enable(queue->udm, 0);
544 /* Stop the MPU since pointer are going away */
545 ark_mpu_stop(queue->mpu);
547 /* Need to clear out mbufs here, dropping packets along the way */
548 eth_ark_rx_queue_drain(queue);
550 for (i = 0; i < queue->queue_size; ++i)
551 rte_pktmbuf_free(queue->reserve_q[i]);
553 rte_free(queue->reserve_q);
554 rte_free(queue->paddress_q);
559 eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
561 struct ark_rx_queue *queue;
562 struct ark_udm_t *udm;
569 uint64_t ibytes = ark_udm_bytes(udm);
570 uint64_t ipackets = ark_udm_packets(udm);
571 uint64_t idropped = ark_udm_dropped(queue->udm);
573 stats->q_ipackets[queue->queue_index] = ipackets;
574 stats->q_ibytes[queue->queue_index] = ibytes;
575 stats->q_errors[queue->queue_index] = idropped;
576 stats->ipackets += ipackets;
577 stats->ibytes += ibytes;
578 stats->imissed += idropped;
582 eth_rx_queue_stats_reset(void *vqueue)
584 struct ark_rx_queue *queue;
590 ark_mpu_reset_stats(queue->mpu);
591 ark_udm_queue_stats_reset(queue->udm);
595 eth_ark_udm_force_close(struct rte_eth_dev *dev)
597 struct ark_adapter *ark = dev->data->dev_private;
598 struct ark_rx_queue *queue;
602 if (!ark_udm_is_flushed(ark->udm.v)) {
603 /* restart the MPUs */
604 ARK_PMD_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
605 for (i = 0; i < dev->data->nb_rx_queues; i++) {
606 queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
610 ark_mpu_start(queue->mpu);
611 /* Add some buffers */
612 index = 100000 + queue->seed_index;
613 ark_mpu_set_producer(queue->mpu, index);
615 /* Wait to allow data to pass */
618 ARK_PMD_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
619 ark_udm_is_flushed(ark->udm.v));
621 ark_udm_reset(ark->udm.v);
625 ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
629 ARK_PMD_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
630 ARK_PMD_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
631 "queue_size", queue->queue_size,
632 "seed_index", queue->seed_index,
633 "prod_index", queue->prod_index,
634 "cons_index", queue->cons_index);
636 ark_mpu_dump(queue->mpu, name, queue->phys_qid);
637 ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
638 ark_udm_dump(queue->udm, name);
639 ark_udm_dump_setup(queue->udm, queue->phys_qid);
642 /* Only used in debug.
643 * This function is a raw memory dump of a portion of an mbuf's memory
644 * region. The usual function, rte_pktmbuf_dump() only shows data
645 * with respect to the data_off field. This function show data
646 * anywhere in the mbuf's buffer. This is useful for examining
647 * data in the headroom or tailroom portion of an mbuf.
650 dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
654 ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
655 mbuf, mbuf->pkt_len, mbuf->data_off);
656 for (i = lo; i < hi; i += 16) {
657 uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
659 ARK_PMD_LOG(DEBUG, " %6d: ", i);
660 for (j = 0; j < 16; j++)
661 ARK_PMD_LOG(DEBUG, " %02x", dp[j]);
663 ARK_PMD_LOG(DEBUG, "\n");