1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Broadcom.
8 #include <rte_atomic.h>
9 #include <rte_bitmap.h>
10 #include <rte_common.h>
12 #include <rte_malloc.h>
13 #include <rte_memzone.h>
14 #include <rte_prefetch.h>
15 #include <rte_string_fns.h>
17 #include "bcmfs_logs.h"
19 #include "bcmfs_hw_defs.h"
21 /* TX or submission queue name */
22 static const char *txq_name = "tx";
23 /* Completion or receive queue name */
24 static const char *cmplq_name = "cmpl";
28 bcmfs_qp_check_queue_alignment(uint64_t phys_addr,
31 if (((align - 1) & phys_addr) != 0)
37 bcmfs_queue_delete(struct bcmfs_queue *queue,
38 uint16_t queue_pair_id)
40 const struct rte_memzone *mz;
44 BCMFS_LOG(DEBUG, "Invalid queue");
47 BCMFS_LOG(DEBUG, "Free ring %d type %d, memzone: %s",
48 queue_pair_id, queue->q_type, queue->memz_name);
50 mz = rte_memzone_lookup(queue->memz_name);
52 /* Write an unused pattern to the queue memory. */
53 memset(queue->base_addr, 0x9B, queue->queue_size);
54 status = rte_memzone_free(mz);
56 BCMFS_LOG(ERR, "Error %d on freeing queue %s",
57 status, queue->memz_name);
59 BCMFS_LOG(DEBUG, "queue %s doesn't exist",
64 static const struct rte_memzone *
65 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
66 int socket_id, unsigned int align)
68 const struct rte_memzone *mz;
70 mz = rte_memzone_lookup(queue_name);
72 if (((size_t)queue_size <= mz->len) &&
73 (socket_id == SOCKET_ID_ANY ||
74 socket_id == mz->socket_id)) {
75 BCMFS_LOG(DEBUG, "re-use memzone already "
76 "allocated for %s", queue_name);
80 BCMFS_LOG(ERR, "Incompatible memzone already "
81 "allocated %s, size %u, socket %d. "
82 "Requested size %u, socket %u",
83 queue_name, (uint32_t)mz->len,
84 mz->socket_id, queue_size, socket_id);
88 BCMFS_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
89 queue_name, queue_size, socket_id);
90 return rte_memzone_reserve_aligned(queue_name, queue_size,
91 socket_id, RTE_MEMZONE_IOVA_CONTIG, align);
95 bcmfs_queue_create(struct bcmfs_queue *queue,
96 struct bcmfs_qp_config *qp_conf,
97 uint16_t queue_pair_id,
98 enum bcmfs_queue_type qtype)
100 const struct rte_memzone *qp_mz;
103 uint32_t queue_size_bytes;
106 if (qtype == BCMFS_RM_TXQ) {
107 strlcpy(q_name, txq_name, sizeof(q_name));
108 align = 1U << FS_RING_BD_ALIGN_ORDER;
109 queue_size_bytes = qp_conf->nb_descriptors *
110 qp_conf->max_descs_req * FS_RING_DESC_SIZE;
111 queue_size_bytes = RTE_ALIGN_MUL_CEIL(queue_size_bytes,
113 /* make queue size to multiple for 4K pages */
114 } else if (qtype == BCMFS_RM_CPLQ) {
115 strlcpy(q_name, cmplq_name, sizeof(q_name));
116 align = 1U << FS_RING_CMPL_ALIGN_ORDER;
119 * Memory size for cmpl + MSI
120 * For MSI allocate here itself and so we allocate twice
122 queue_size_bytes = 2 * FS_RING_CMPL_SIZE;
124 BCMFS_LOG(ERR, "Invalid queue selection");
128 queue->q_type = qtype;
131 * Allocate a memzone for the queue - create a unique name.
133 snprintf(queue->memz_name, sizeof(queue->memz_name),
134 "%s_%d_%s_%d_%s", "bcmfs", qtype, "qp_mem",
135 queue_pair_id, q_name);
136 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
139 BCMFS_LOG(ERR, "Failed to allocate ring memzone");
143 if (bcmfs_qp_check_queue_alignment(qp_mz->iova, align)) {
144 BCMFS_LOG(ERR, "Invalid alignment on queue create "
146 queue->base_phys_addr);
148 goto queue_create_err;
151 queue->base_addr = (char *)qp_mz->addr;
152 queue->base_phys_addr = qp_mz->iova;
153 queue->queue_size = queue_size_bytes;
158 rte_memzone_free(qp_mz);
164 bcmfs_qp_release(struct bcmfs_qp **qp_addr)
166 struct bcmfs_qp *qp = *qp_addr;
169 BCMFS_LOG(DEBUG, "qp already freed");
173 /* Don't free memory if there are still responses to be processed */
174 if ((qp->stats.enqueued_count - qp->stats.dequeued_count) == 0) {
175 /* Stop the h/w ring */
177 /* Delete the queue pairs */
178 bcmfs_queue_delete(&qp->tx_q, qp->qpair_id);
179 bcmfs_queue_delete(&qp->cmpl_q, qp->qpair_id);
184 rte_bitmap_reset(qp->ctx_bmp);
185 rte_free(qp->ctx_bmp_mem);
186 rte_free(qp->ctx_pool);
195 bcmfs_qp_setup(struct bcmfs_qp **qp_addr,
196 uint16_t queue_pair_id,
197 struct bcmfs_qp_config *qp_conf)
201 uint32_t nb_descriptors = qp_conf->nb_descriptors;
205 if (nb_descriptors < FS_RM_MIN_REQS) {
206 BCMFS_LOG(ERR, "Can't create qp for %u descriptors",
211 if (nb_descriptors > FS_RM_MAX_REQS)
212 nb_descriptors = FS_RM_MAX_REQS;
214 if (qp_conf->iobase == NULL) {
215 BCMFS_LOG(ERR, "IO config space null");
219 qp = rte_zmalloc_socket("BCM FS PMD qp metadata",
220 sizeof(*qp), RTE_CACHE_LINE_SIZE,
223 BCMFS_LOG(ERR, "Failed to alloc mem for qp struct");
227 qp->qpair_id = queue_pair_id;
228 qp->ioreg = qp_conf->iobase;
229 qp->nb_descriptors = nb_descriptors;
230 qp->ops = qp_conf->ops;
232 qp->stats.enqueued_count = 0;
233 qp->stats.dequeued_count = 0;
235 rc = bcmfs_queue_create(&qp->tx_q, qp_conf, qp->qpair_id,
238 BCMFS_LOG(ERR, "Tx queue create failed queue_pair_id %u",
243 rc = bcmfs_queue_create(&qp->cmpl_q, qp_conf, qp->qpair_id,
246 BCMFS_LOG(ERR, "Cmpl queue create failed queue_pair_id= %u",
251 /* ctx saving bitmap */
252 bmp_size = rte_bitmap_get_memory_footprint(nb_descriptors);
254 /* Allocate memory for bitmap */
255 qp->ctx_bmp_mem = rte_zmalloc("ctx_bmp_mem", bmp_size,
256 RTE_CACHE_LINE_SIZE);
257 if (qp->ctx_bmp_mem == NULL) {
262 /* Initialize pool resource bitmap array */
263 qp->ctx_bmp = rte_bitmap_init(nb_descriptors, qp->ctx_bmp_mem,
265 if (qp->ctx_bmp == NULL) {
270 /* Mark all pools available */
271 for (i = 0; i < nb_descriptors; i++)
272 rte_bitmap_set(qp->ctx_bmp, i);
274 /* Allocate memory for context */
275 qp->ctx_pool = rte_zmalloc("qp_ctx_pool",
276 sizeof(unsigned long) *
278 if (qp->ctx_pool == NULL) {
279 BCMFS_LOG(ERR, "ctx allocation pool fails");
292 rte_bitmap_reset(qp->ctx_bmp);
294 rte_free(qp->ctx_bmp_mem);
296 bcmfs_queue_delete(&qp->cmpl_q, queue_pair_id);
298 bcmfs_queue_delete(&qp->tx_q, queue_pair_id);
306 bcmfs_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
308 struct bcmfs_qp *tmp_qp = (struct bcmfs_qp *)qp;
309 register uint32_t nb_ops_sent = 0;
310 uint16_t nb_ops_possible = nb_ops;
313 if (unlikely(nb_ops == 0))
316 while (nb_ops_sent != nb_ops_possible) {
317 ret = tmp_qp->ops->enq_one_req(qp, *ops);
319 tmp_qp->stats.enqueue_err_count++;
320 /* This message cannot be enqueued */
321 if (nb_ops_sent == 0)
331 tmp_qp->stats.enqueued_count += nb_ops_sent;
332 tmp_qp->ops->ring_db(tmp_qp);
338 bcmfs_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
340 struct bcmfs_qp *tmp_qp = (struct bcmfs_qp *)qp;
341 uint32_t deq = tmp_qp->ops->dequeue(tmp_qp, ops, nb_ops);
343 tmp_qp->stats.dequeued_count += deq;
348 void bcmfs_qp_stats_get(struct bcmfs_qp **qp, int num_qp,
349 struct bcmfs_qp_stats *stats)
354 BCMFS_LOG(ERR, "invalid param: stats %p",
359 for (i = 0; i < num_qp; i++) {
361 BCMFS_LOG(DEBUG, "Uninitialised qp %d", i);
365 stats->enqueued_count += qp[i]->stats.enqueued_count;
366 stats->dequeued_count += qp[i]->stats.dequeued_count;
367 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
368 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
372 void bcmfs_qp_stats_reset(struct bcmfs_qp **qp, int num_qp)
376 for (i = 0; i < num_qp; i++) {
378 BCMFS_LOG(DEBUG, "Uninitialised qp %d", i);
381 memset(&qp[i]->stats, 0, sizeof(qp[i]->stats));