1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
11 #include <rte_bus_pci.h>
13 #include <rte_lcore.h>
14 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
21 #include "otx2_common.h"
22 #include "otx2_ep_enqdeq.h"
25 sdp_dmazone_free(const struct rte_memzone *mz)
27 const struct rte_memzone *mz_tmp;
31 otx2_err("Memzone %s : NULL", mz->name);
35 mz_tmp = rte_memzone_lookup(mz->name);
37 otx2_err("Memzone %s Not Found", mz->name);
41 ret = rte_memzone_free(mz);
43 otx2_err("Memzone free failed : ret = %d", ret);
47 /* Free IQ resources */
49 sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
51 struct sdp_instr_queue *iq;
53 iq = sdpvf->instr_queue[iq_no];
55 otx2_err("Invalid IQ[%d]\n", iq_no);
59 rte_free(iq->req_list);
63 sdp_dmazone_free(iq->iq_mz);
67 rte_free(sdpvf->instr_queue[iq_no]);
68 sdpvf->instr_queue[iq_no] = NULL;
72 otx2_info("IQ[%d] is deleted", iq_no);
77 /* IQ initialization */
79 sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no)
81 const struct sdp_config *conf;
82 struct sdp_instr_queue *iq;
86 iq = sdpvf->instr_queue[iq_no];
87 q_size = conf->iq.instr_type * conf->num_iqdef_descs;
89 /* IQ memory creation for Instruction submission to OCTEON TX2 */
90 iq->iq_mz = rte_memzone_reserve_aligned("iqmz",
93 RTE_MEMZONE_IOVA_CONTIG,
95 if (iq->iq_mz == NULL) {
96 otx2_err("IQ[%d] memzone alloc failed", iq_no);
100 iq->base_addr_dma = iq->iq_mz->iova;
101 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
103 if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) {
104 otx2_err("IQ[%d] descs not in power of 2", iq_no);
108 iq->nb_desc = conf->num_iqdef_descs;
110 /* Create a IQ request list to hold requests that have been
111 * posted to OCTEON TX2. This list will be used for freeing the IQ
112 * data buffer(s) later once the OCTEON TX2 fetched the requests.
114 iq->req_list = rte_zmalloc_socket("request_list",
115 (iq->nb_desc * SDP_IQREQ_LIST_SIZE),
118 if (iq->req_list == NULL) {
119 otx2_err("IQ[%d] req_list alloc failed", iq_no);
123 otx2_info("IQ[%d]: base: %p basedma: %lx count: %d",
124 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
130 iq->host_write_index = 0;
131 iq->otx_read_index = 0;
134 /* Initialize the spinlock for this instruction queue */
135 rte_spinlock_init(&iq->lock);
136 rte_spinlock_init(&iq->post_lock);
138 rte_atomic64_clear(&iq->iq_flush_running);
140 sdpvf->io_qmask.iq |= (1ull << iq_no);
142 /* Set 32B/64B mode for each input queue */
143 if (conf->iq.instr_type == 64)
144 sdpvf->io_qmask.iq64B |= (1ull << iq_no);
146 iq->iqcmd_64B = (conf->iq.instr_type == 64);
148 /* Set up IQ registers */
149 sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no);
159 sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
161 struct sdp_instr_queue *iq;
163 iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq),
164 RTE_CACHE_LINE_SIZE);
168 sdpvf->instr_queue[iq_no] = iq;
170 if (sdp_init_instr_queue(sdpvf, iq_no)) {
171 otx2_err("IQ init is failed");
174 otx2_info("IQ[%d] is created.", sdpvf->num_iqs);
182 sdp_delete_iqs(sdpvf, iq_no);
187 sdp_droq_reset_indices(struct sdp_droq *droq)
191 droq->refill_idx = 0;
192 droq->refill_count = 0;
193 rte_atomic64_set(&droq->pkts_pending, 0);
197 sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf,
198 struct sdp_droq *droq)
202 for (idx = 0; idx < droq->nb_desc; idx++) {
203 if (droq->recv_buf_list[idx].buffer) {
204 rte_mempool_put(sdpvf->enqdeq_mpool,
205 droq->recv_buf_list[idx].buffer);
207 droq->recv_buf_list[idx].buffer = NULL;
211 sdp_droq_reset_indices(droq);
214 /* Free OQs resources */
216 sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
218 struct sdp_droq *droq;
220 droq = sdpvf->droq[oq_no];
222 otx2_err("Invalid droq[%d]", oq_no);
226 sdp_droq_destroy_ring_buffers(sdpvf, droq);
227 rte_free(droq->recv_buf_list);
228 droq->recv_buf_list = NULL;
231 sdp_dmazone_free(droq->info_mz);
232 droq->info_mz = NULL;
235 if (droq->desc_ring_mz) {
236 sdp_dmazone_free(droq->desc_ring_mz);
237 droq->desc_ring_mz = NULL;
240 memset(droq, 0, SDP_DROQ_SIZE);
242 rte_free(sdpvf->droq[oq_no]);
243 sdpvf->droq[oq_no] = NULL;
247 otx2_info("OQ[%d] is deleted", oq_no);
252 sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf,
253 struct sdp_droq *droq)
255 struct sdp_droq_desc *desc_ring = droq->desc_ring;
259 for (idx = 0; idx < droq->nb_desc; idx++) {
260 rte_mempool_get(sdpvf->enqdeq_mpool, &buf);
262 otx2_err("OQ buffer alloc failed");
263 /* sdp_droq_destroy_ring_buffers(droq);*/
267 droq->recv_buf_list[idx].buffer = buf;
268 droq->info_list[idx].length = 0;
270 /* Map ring buffers into memory */
271 desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma +
272 (idx * SDP_DROQ_INFO_SIZE));
274 desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf);
277 sdp_droq_reset_indices(droq);
283 sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused,
284 struct sdp_droq *droq)
286 droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list",
287 (droq->nb_desc * SDP_DROQ_INFO_SIZE),
289 RTE_MEMZONE_IOVA_CONTIG,
290 RTE_CACHE_LINE_SIZE);
292 if (droq->info_mz == NULL)
295 droq->info_list_dma = droq->info_mz->iova;
296 droq->info_alloc_size = droq->info_mz->len;
297 droq->info_base_addr = (size_t)droq->info_mz->addr;
299 return droq->info_mz->addr;
302 /* OQ initialization */
304 sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no)
306 const struct sdp_config *conf = sdpvf->conf;
307 uint32_t c_refill_threshold;
308 uint32_t desc_ring_size;
309 struct sdp_droq *droq;
311 otx2_info("OQ[%d] Init start", q_no);
313 droq = sdpvf->droq[q_no];
314 droq->sdp_dev = sdpvf;
317 c_refill_threshold = conf->oq.refill_threshold;
318 droq->nb_desc = conf->num_oqdef_descs;
319 droq->buffer_size = conf->oqdef_buf_size;
321 /* OQ desc_ring set up */
322 desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE;
323 droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz",
326 RTE_MEMZONE_IOVA_CONTIG,
327 RTE_CACHE_LINE_SIZE);
329 if (droq->desc_ring_mz == NULL) {
330 otx2_err("OQ:%d desc_ring allocation failed", q_no);
334 droq->desc_ring_dma = droq->desc_ring_mz->iova;
335 droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr;
337 otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx",
338 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
339 otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc);
342 /* OQ info_list set up */
343 droq->info_list = sdp_alloc_info_buffer(sdpvf, droq);
344 if (droq->info_list == NULL) {
345 otx2_err("memory allocation failed for OQ[%d] info_list", q_no);
349 /* OQ buf_list set up */
350 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
351 (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE),
352 RTE_CACHE_LINE_SIZE, rte_socket_id());
353 if (droq->recv_buf_list == NULL) {
354 otx2_err("OQ recv_buf_list alloc failed");
358 if (sdp_droq_setup_ring_buffers(sdpvf, droq))
361 droq->refill_threshold = c_refill_threshold;
362 rte_spinlock_init(&droq->lock);
365 /* Set up OQ registers */
366 sdpvf->fn_list.setup_oq_regs(sdpvf, q_no);
368 sdpvf->io_qmask.oq |= (1ull << q_no);
376 /* OQ configuration and setup */
378 sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
380 struct sdp_droq *droq;
382 /* Allocate new droq. */
383 droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ",
384 sizeof(*droq), RTE_CACHE_LINE_SIZE);
386 otx2_err("Droq[%d] Creation Failed", oq_no);
389 sdpvf->droq[oq_no] = droq;
391 if (sdp_init_droq(sdpvf, oq_no)) {
392 otx2_err("Droq[%d] Initialization failed", oq_no);
395 otx2_info("OQ[%d] is created.", oq_no);
402 sdp_delete_oqs(sdpvf, oq_no);
407 sdp_iqreq_delete(struct sdp_device *sdpvf,
408 struct sdp_instr_queue *iq, uint32_t idx)
413 buf = iq->req_list[idx].buf;
414 reqtype = iq->req_list[idx].reqtype;
417 case SDP_REQTYPE_NORESP:
418 rte_mempool_put(sdpvf->enqdeq_mpool, buf);
419 otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx);
422 case SDP_REQTYPE_NORESP_GATHER:
423 case SDP_REQTYPE_NONE:
425 otx2_info("This iqreq mode is not supported:%d", reqtype);
429 /* Reset the request list at this index */
430 iq->req_list[idx].buf = NULL;
431 iq->req_list[idx].reqtype = 0;
435 sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf,
438 iq->req_list[iq->host_write_index].buf = buf;
439 iq->req_list[iq->host_write_index].reqtype = reqtype;
441 otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index);
446 sdp_flush_iq(struct sdp_device *sdpvf,
447 struct sdp_instr_queue *iq,
448 uint32_t pending_thresh __rte_unused)
450 uint32_t instr_processed = 0;
452 rte_spinlock_lock(&iq->lock);
454 iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq);
455 while (iq->flush_index != iq->otx_read_index) {
456 /* Free the IQ data buffer to the pool */
457 sdp_iqreq_delete(sdpvf, iq, iq->flush_index);
459 sdp_incr_index(iq->flush_index, 1, iq->nb_desc);
464 iq->stats.instr_processed = instr_processed;
465 rte_atomic64_sub(&iq->instr_pending, instr_processed);
467 rte_spinlock_unlock(&iq->lock);
471 sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,
472 struct sdp_instr_queue *iq)
474 otx2_write64(iq->fill_cnt, iq->doorbell_reg);
476 /* Make sure doorbell writes observed by HW */
483 post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd)
485 uint8_t *iqptr, cmdsize;
487 /* This ensures that the read index does not wrap around to
488 * the same position if queue gets full before OCTEON TX2 could
491 if (rte_atomic64_read(&iq->instr_pending) >=
492 (int32_t)(iq->nb_desc - 1)) {
493 otx2_err("IQ is full, pending:%ld",
494 (long)rte_atomic64_read(&iq->instr_pending));
496 return SDP_IQ_SEND_FAILED;
499 /* Copy cmd into iq */
500 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
501 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
503 rte_memcpy(iqptr, iqcmd, cmdsize);
505 otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index);
507 /* Increment the host write index */
508 iq->host_write_index =
509 sdp_incr_index(iq->host_write_index, 1, iq->nb_desc);
513 /* Flush the command into memory. We need to be sure the data
514 * is in memory before indicating that the instruction is
518 rte_atomic64_inc(&iq->instr_pending);
520 /* SDP_IQ_SEND_SUCCESS */
526 sdp_send_data(struct sdp_device *sdpvf,
527 struct sdp_instr_queue *iq, void *cmd)
531 /* Lock this IQ command queue before posting instruction */
532 rte_spinlock_lock(&iq->post_lock);
534 /* Submit IQ command */
535 ret = post_iqcmd(iq, cmd);
537 if (ret == SDP_IQ_SEND_SUCCESS) {
538 sdp_ring_doorbell(sdpvf, iq);
540 iq->stats.instr_posted++;
541 otx2_sdp_dbg("Instr submit success posted: %ld\n",
542 (long)iq->stats.instr_posted);
545 iq->stats.instr_dropped++;
546 otx2_err("Instr submit failed, dropped: %ld\n",
547 (long)iq->stats.instr_dropped);
551 rte_spinlock_unlock(&iq->post_lock);
557 /* Enqueue requests/packets to SDP IQ queue.
558 * returns number of requests enqueued successfully
561 sdp_rawdev_enqueue(struct rte_rawdev *rawdev,
562 struct rte_rawdev_buf **buffers __rte_unused,
563 unsigned int count, rte_rawdev_obj_t context)
565 struct sdp_instr_64B *iqcmd;
566 struct sdp_instr_queue *iq;
567 struct sdp_soft_instr *si;
568 struct sdp_device *sdpvf;
570 struct sdp_instr_ih ihx;
572 sdpvf = (struct sdp_device *)rawdev->dev_private;
573 si = (struct sdp_soft_instr *)context;
575 iq = sdpvf->instr_queue[si->q_no];
577 if ((count > 1) || (count < 1)) {
578 otx2_err("This mode not supported: req[%d]", count);
582 memset(&ihx, 0, sizeof(struct sdp_instr_ih));
584 iqcmd = &si->command;
585 memset(iqcmd, 0, sizeof(struct sdp_instr_64B));
587 iqcmd->dptr = (uint64_t)si->dptr;
589 /* Populate SDP IH */
590 ihx.pkind = sdpvf->pkind;
591 ihx.fsz = si->ih.fsz + 8; /* 8B for NIX IH */
592 ihx.gather = si->ih.gather;
594 /* Direct data instruction */
595 ihx.tlen = si->ih.tlen + ihx.fsz;
597 switch (ihx.gather) {
598 case 0: /* Direct data instr */
599 ihx.tlen = si->ih.tlen + ihx.fsz;
602 default: /* Gather */
603 switch (si->ih.gsz) {
604 case 0: /* Direct gather instr */
605 otx2_err("Direct Gather instr : not supported");
608 default: /* Indirect gather instr */
609 otx2_err("Indirect Gather instr : not supported");
614 rte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t));
615 iqcmd->rptr = (uint64_t)si->rptr;
616 rte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t));
618 /* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */
619 sdp_swap_8B_data(&iqcmd->rptr, 1);
620 sdp_swap_8B_data(&iqcmd->irh, 1);
622 otx2_sdp_dbg("After swapping");
623 otx2_sdp_dbg("Word0 [dptr]: 0x%016lx", (unsigned long)iqcmd->dptr);
624 otx2_sdp_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd->ih);
625 otx2_sdp_dbg("Word2 [rptr]: 0x%016lx", (unsigned long)iqcmd->rptr);
626 otx2_sdp_dbg("Word3 [irh]: 0x%016lx", (unsigned long)iqcmd->irh);
627 otx2_sdp_dbg("Word4 [exhdr[0]]: 0x%016lx",
628 (unsigned long)iqcmd->exhdr[0]);
630 sdp_iqreq_add(iq, si->dptr, si->reqtype);
632 if (sdp_send_data(sdpvf, iq, iqcmd)) {
633 otx2_err("Data send failed :");
634 sdp_iqreq_delete(sdpvf, iq, iq->host_write_index);
638 if (rte_atomic64_read(&iq->instr_pending) >= 1)
639 sdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/);
641 /* Return no# of instructions posted successfully. */
645 return SDP_IQ_SEND_FAILED;