1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
11 #include <rte_bus_pci.h>
13 #include <rte_lcore.h>
14 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
21 #include "otx2_common.h"
22 #include "otx2_ep_enqdeq.h"
25 sdp_dmazone_free(const struct rte_memzone *mz)
27 const struct rte_memzone *mz_tmp;
31 otx2_err("Memzone %s : NULL", mz->name);
35 mz_tmp = rte_memzone_lookup(mz->name);
37 otx2_err("Memzone %s Not Found", mz->name);
41 ret = rte_memzone_free(mz);
43 otx2_err("Memzone free failed : ret = %d", ret);
47 /* Free IQ resources */
49 sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
51 struct sdp_instr_queue *iq;
53 iq = sdpvf->instr_queue[iq_no];
55 otx2_err("Invalid IQ[%d]\n", iq_no);
59 rte_free(iq->req_list);
63 sdp_dmazone_free(iq->iq_mz);
67 rte_free(sdpvf->instr_queue[iq_no]);
68 sdpvf->instr_queue[iq_no] = NULL;
72 otx2_info("IQ[%d] is deleted", iq_no);
77 /* IQ initialization */
79 sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no)
81 const struct sdp_config *conf;
82 struct sdp_instr_queue *iq;
86 iq = sdpvf->instr_queue[iq_no];
87 q_size = conf->iq.instr_type * conf->num_iqdef_descs;
89 /* IQ memory creation for Instruction submission to OCTEON TX2 */
90 iq->iq_mz = rte_memzone_reserve_aligned("iqmz",
93 RTE_MEMZONE_IOVA_CONTIG,
95 if (iq->iq_mz == NULL) {
96 otx2_err("IQ[%d] memzone alloc failed", iq_no);
100 iq->base_addr_dma = iq->iq_mz->iova;
101 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
103 if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) {
104 otx2_err("IQ[%d] descs not in power of 2", iq_no);
108 iq->nb_desc = conf->num_iqdef_descs;
110 /* Create a IQ request list to hold requests that have been
111 * posted to OCTEON TX2. This list will be used for freeing the IQ
112 * data buffer(s) later once the OCTEON TX2 fetched the requests.
114 iq->req_list = rte_zmalloc_socket("request_list",
115 (iq->nb_desc * SDP_IQREQ_LIST_SIZE),
118 if (iq->req_list == NULL) {
119 otx2_err("IQ[%d] req_list alloc failed", iq_no);
123 otx2_info("IQ[%d]: base: %p basedma: %lx count: %d",
124 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
130 iq->host_write_index = 0;
131 iq->otx_read_index = 0;
134 /* Initialize the spinlock for this instruction queue */
135 rte_spinlock_init(&iq->lock);
136 rte_spinlock_init(&iq->post_lock);
138 rte_atomic64_clear(&iq->iq_flush_running);
140 sdpvf->io_qmask.iq |= (1ull << iq_no);
142 /* Set 32B/64B mode for each input queue */
143 if (conf->iq.instr_type == 64)
144 sdpvf->io_qmask.iq64B |= (1ull << iq_no);
146 iq->iqcmd_64B = (conf->iq.instr_type == 64);
148 /* Set up IQ registers */
149 sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no);
159 sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
161 struct sdp_instr_queue *iq;
163 iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq),
164 RTE_CACHE_LINE_SIZE);
168 sdpvf->instr_queue[iq_no] = iq;
170 if (sdp_init_instr_queue(sdpvf, iq_no)) {
171 otx2_err("IQ init is failed");
174 otx2_info("IQ[%d] is created.", sdpvf->num_iqs);
182 sdp_delete_iqs(sdpvf, iq_no);
187 sdp_droq_reset_indices(struct sdp_droq *droq)
191 droq->refill_idx = 0;
192 droq->refill_count = 0;
193 rte_atomic64_set(&droq->pkts_pending, 0);
197 sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf,
198 struct sdp_droq *droq)
202 for (idx = 0; idx < droq->nb_desc; idx++) {
203 if (droq->recv_buf_list[idx].buffer) {
204 rte_mempool_put(sdpvf->enqdeq_mpool,
205 droq->recv_buf_list[idx].buffer);
207 droq->recv_buf_list[idx].buffer = NULL;
211 sdp_droq_reset_indices(droq);
214 /* Free OQs resources */
216 sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
218 struct sdp_droq *droq;
220 droq = sdpvf->droq[oq_no];
222 otx2_err("Invalid droq[%d]", oq_no);
226 sdp_droq_destroy_ring_buffers(sdpvf, droq);
227 rte_free(droq->recv_buf_list);
228 droq->recv_buf_list = NULL;
231 sdp_dmazone_free(droq->info_mz);
232 droq->info_mz = NULL;
235 if (droq->desc_ring_mz) {
236 sdp_dmazone_free(droq->desc_ring_mz);
237 droq->desc_ring_mz = NULL;
240 memset(droq, 0, SDP_DROQ_SIZE);
242 rte_free(sdpvf->droq[oq_no]);
243 sdpvf->droq[oq_no] = NULL;
247 otx2_info("OQ[%d] is deleted", oq_no);
252 sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf,
253 struct sdp_droq *droq)
255 struct sdp_droq_desc *desc_ring = droq->desc_ring;
259 for (idx = 0; idx < droq->nb_desc; idx++) {
260 if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) ||
262 otx2_err("OQ buffer alloc failed");
263 droq->stats.rx_alloc_failure++;
264 /* sdp_droq_destroy_ring_buffers(droq);*/
268 droq->recv_buf_list[idx].buffer = buf;
269 droq->info_list[idx].length = 0;
271 /* Map ring buffers into memory */
272 desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma +
273 (idx * SDP_DROQ_INFO_SIZE));
275 desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf);
278 sdp_droq_reset_indices(droq);
284 sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused,
285 struct sdp_droq *droq)
287 droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list",
288 (droq->nb_desc * SDP_DROQ_INFO_SIZE),
290 RTE_MEMZONE_IOVA_CONTIG,
291 RTE_CACHE_LINE_SIZE);
293 if (droq->info_mz == NULL)
296 droq->info_list_dma = droq->info_mz->iova;
297 droq->info_alloc_size = droq->info_mz->len;
298 droq->info_base_addr = (size_t)droq->info_mz->addr;
300 return droq->info_mz->addr;
303 /* OQ initialization */
305 sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no)
307 const struct sdp_config *conf = sdpvf->conf;
308 uint32_t c_refill_threshold;
309 uint32_t desc_ring_size;
310 struct sdp_droq *droq;
312 otx2_info("OQ[%d] Init start", q_no);
314 droq = sdpvf->droq[q_no];
315 droq->sdp_dev = sdpvf;
318 c_refill_threshold = conf->oq.refill_threshold;
319 droq->nb_desc = conf->num_oqdef_descs;
320 droq->buffer_size = conf->oqdef_buf_size;
322 /* OQ desc_ring set up */
323 desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE;
324 droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz",
327 RTE_MEMZONE_IOVA_CONTIG,
328 RTE_CACHE_LINE_SIZE);
330 if (droq->desc_ring_mz == NULL) {
331 otx2_err("OQ:%d desc_ring allocation failed", q_no);
335 droq->desc_ring_dma = droq->desc_ring_mz->iova;
336 droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr;
338 otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx",
339 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
340 otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc);
343 /* OQ info_list set up */
344 droq->info_list = sdp_alloc_info_buffer(sdpvf, droq);
345 if (droq->info_list == NULL) {
346 otx2_err("memory allocation failed for OQ[%d] info_list", q_no);
350 /* OQ buf_list set up */
351 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
352 (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE),
353 RTE_CACHE_LINE_SIZE, rte_socket_id());
354 if (droq->recv_buf_list == NULL) {
355 otx2_err("OQ recv_buf_list alloc failed");
359 if (sdp_droq_setup_ring_buffers(sdpvf, droq))
362 droq->refill_threshold = c_refill_threshold;
363 rte_spinlock_init(&droq->lock);
366 /* Set up OQ registers */
367 sdpvf->fn_list.setup_oq_regs(sdpvf, q_no);
369 sdpvf->io_qmask.oq |= (1ull << q_no);
377 /* OQ configuration and setup */
379 sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
381 struct sdp_droq *droq;
383 /* Allocate new droq. */
384 droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ",
385 sizeof(*droq), RTE_CACHE_LINE_SIZE);
387 otx2_err("Droq[%d] Creation Failed", oq_no);
390 sdpvf->droq[oq_no] = droq;
392 if (sdp_init_droq(sdpvf, oq_no)) {
393 otx2_err("Droq[%d] Initialization failed", oq_no);
396 otx2_info("OQ[%d] is created.", oq_no);
403 sdp_delete_oqs(sdpvf, oq_no);
408 sdp_iqreq_delete(struct sdp_device *sdpvf,
409 struct sdp_instr_queue *iq, uint32_t idx)
414 buf = iq->req_list[idx].buf;
415 reqtype = iq->req_list[idx].reqtype;
418 case SDP_REQTYPE_NORESP:
419 rte_mempool_put(sdpvf->enqdeq_mpool, buf);
420 otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx);
423 case SDP_REQTYPE_NORESP_GATHER:
424 case SDP_REQTYPE_NONE:
426 otx2_info("This iqreq mode is not supported:%d", reqtype);
430 /* Reset the request list at this index */
431 iq->req_list[idx].buf = NULL;
432 iq->req_list[idx].reqtype = 0;
436 sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf,
439 iq->req_list[iq->host_write_index].buf = buf;
440 iq->req_list[iq->host_write_index].reqtype = reqtype;
442 otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index);
447 sdp_flush_iq(struct sdp_device *sdpvf,
448 struct sdp_instr_queue *iq,
449 uint32_t pending_thresh __rte_unused)
451 uint32_t instr_processed = 0;
453 rte_spinlock_lock(&iq->lock);
455 iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq);
456 while (iq->flush_index != iq->otx_read_index) {
457 /* Free the IQ data buffer to the pool */
458 sdp_iqreq_delete(sdpvf, iq, iq->flush_index);
460 sdp_incr_index(iq->flush_index, 1, iq->nb_desc);
465 iq->stats.instr_processed = instr_processed;
466 rte_atomic64_sub(&iq->instr_pending, instr_processed);
468 rte_spinlock_unlock(&iq->lock);
472 sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,
473 struct sdp_instr_queue *iq)
475 otx2_write64(iq->fill_cnt, iq->doorbell_reg);
477 /* Make sure doorbell writes observed by HW */
484 post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd)
486 uint8_t *iqptr, cmdsize;
488 /* This ensures that the read index does not wrap around to
489 * the same position if queue gets full before OCTEON TX2 could
492 if (rte_atomic64_read(&iq->instr_pending) >=
493 (int32_t)(iq->nb_desc - 1)) {
494 otx2_err("IQ is full, pending:%ld",
495 (long)rte_atomic64_read(&iq->instr_pending));
497 return SDP_IQ_SEND_FAILED;
500 /* Copy cmd into iq */
501 cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
502 iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
504 rte_memcpy(iqptr, iqcmd, cmdsize);
506 otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index);
508 /* Increment the host write index */
509 iq->host_write_index =
510 sdp_incr_index(iq->host_write_index, 1, iq->nb_desc);
514 /* Flush the command into memory. We need to be sure the data
515 * is in memory before indicating that the instruction is
519 rte_atomic64_inc(&iq->instr_pending);
521 /* SDP_IQ_SEND_SUCCESS */
527 sdp_send_data(struct sdp_device *sdpvf,
528 struct sdp_instr_queue *iq, void *cmd)
532 /* Lock this IQ command queue before posting instruction */
533 rte_spinlock_lock(&iq->post_lock);
535 /* Submit IQ command */
536 ret = post_iqcmd(iq, cmd);
538 if (ret == SDP_IQ_SEND_SUCCESS) {
539 sdp_ring_doorbell(sdpvf, iq);
541 iq->stats.instr_posted++;
542 otx2_sdp_dbg("Instr submit success posted: %ld\n",
543 (long)iq->stats.instr_posted);
546 iq->stats.instr_dropped++;
547 otx2_err("Instr submit failed, dropped: %ld\n",
548 (long)iq->stats.instr_dropped);
552 rte_spinlock_unlock(&iq->post_lock);
558 /* Enqueue requests/packets to SDP IQ queue.
559 * returns number of requests enqueued successfully
562 sdp_rawdev_enqueue(struct rte_rawdev *rawdev,
563 struct rte_rawdev_buf **buffers __rte_unused,
564 unsigned int count, rte_rawdev_obj_t context)
566 struct sdp_instr_64B *iqcmd;
567 struct sdp_instr_queue *iq;
568 struct sdp_soft_instr *si;
569 struct sdp_device *sdpvf;
571 struct sdp_instr_ih ihx;
573 sdpvf = (struct sdp_device *)rawdev->dev_private;
574 si = (struct sdp_soft_instr *)context;
576 iq = sdpvf->instr_queue[si->q_no];
578 if ((count > 1) || (count < 1)) {
579 otx2_err("This mode not supported: req[%d]", count);
583 memset(&ihx, 0, sizeof(struct sdp_instr_ih));
585 iqcmd = &si->command;
586 memset(iqcmd, 0, sizeof(struct sdp_instr_64B));
588 iqcmd->dptr = (uint64_t)si->dptr;
590 /* Populate SDP IH */
591 ihx.pkind = sdpvf->pkind;
592 ihx.fsz = si->ih.fsz + 8; /* 8B for NIX IH */
593 ihx.gather = si->ih.gather;
595 /* Direct data instruction */
596 ihx.tlen = si->ih.tlen + ihx.fsz;
598 switch (ihx.gather) {
599 case 0: /* Direct data instr */
600 ihx.tlen = si->ih.tlen + ihx.fsz;
603 default: /* Gather */
604 switch (si->ih.gsz) {
605 case 0: /* Direct gather instr */
606 otx2_err("Direct Gather instr : not supported");
609 default: /* Indirect gather instr */
610 otx2_err("Indirect Gather instr : not supported");
615 rte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t));
616 iqcmd->rptr = (uint64_t)si->rptr;
617 rte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t));
619 /* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */
620 sdp_swap_8B_data(&iqcmd->rptr, 1);
621 sdp_swap_8B_data(&iqcmd->irh, 1);
623 otx2_sdp_dbg("After swapping");
624 otx2_sdp_dbg("Word0 [dptr]: 0x%016lx", (unsigned long)iqcmd->dptr);
625 otx2_sdp_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd->ih);
626 otx2_sdp_dbg("Word2 [rptr]: 0x%016lx", (unsigned long)iqcmd->rptr);
627 otx2_sdp_dbg("Word3 [irh]: 0x%016lx", (unsigned long)iqcmd->irh);
628 otx2_sdp_dbg("Word4 [exhdr[0]]: 0x%016lx",
629 (unsigned long)iqcmd->exhdr[0]);
631 sdp_iqreq_add(iq, si->dptr, si->reqtype);
633 if (sdp_send_data(sdpvf, iq, iqcmd)) {
634 otx2_err("Data send failed :");
635 sdp_iqreq_delete(sdpvf, iq, iq->host_write_index);
639 if (rte_atomic64_read(&iq->instr_pending) >= 1)
640 sdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/);
642 /* Return no# of instructions posted successfully. */
646 return SDP_IQ_SEND_FAILED;
650 sdp_droq_refill(struct sdp_device *sdpvf, struct sdp_droq *droq)
652 struct sdp_droq_desc *desc_ring;
653 uint32_t desc_refilled = 0;
656 desc_ring = droq->desc_ring;
658 while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
659 /* If a valid buffer exists (happens if there is no dispatch),
660 * reuse the buffer, else allocate.
662 if (droq->recv_buf_list[droq->refill_idx].buffer != NULL)
665 if (rte_mempool_get(sdpvf->enqdeq_mpool, &buf) ||
667 /* If a buffer could not be allocated, no point in
670 droq->stats.rx_alloc_failure++;
674 droq->recv_buf_list[droq->refill_idx].buffer = buf;
675 desc_ring[droq->refill_idx].buffer_ptr = rte_mem_virt2iova(buf);
677 /* Reset any previous values in the length field. */
678 droq->info_list[droq->refill_idx].length = 0;
680 droq->refill_idx = sdp_incr_index(droq->refill_idx, 1,
684 droq->refill_count--;
688 return desc_refilled;
692 sdp_droq_read_packet(struct sdp_device *sdpvf __rte_unused,
693 struct sdp_droq *droq,
694 struct sdp_droq_pkt *droq_pkt)
696 struct sdp_droq_info *info;
697 uint32_t total_len = 0;
698 uint32_t pkt_len = 0;
700 info = &droq->info_list[droq->read_idx];
701 sdp_swap_8B_data((uint64_t *)&info->length, 1);
703 otx2_err("OQ info_list->length[%ld]", (long)info->length);
707 /* Deduce the actual data size */
708 info->length -= SDP_RH_SIZE;
709 total_len += (uint32_t)info->length;
711 otx2_sdp_dbg("OQ: pkt_len[%ld], buffer_size %d",
712 (long)info->length, droq->buffer_size);
713 if (info->length > droq->buffer_size) {
714 otx2_err("This mode is not supported: pkt_len > buffer_size");
718 if (info->length <= droq->buffer_size) {
719 pkt_len = (uint32_t)info->length;
720 droq_pkt->data = droq->recv_buf_list[droq->read_idx].buffer;
721 droq_pkt->len = pkt_len;
723 droq->recv_buf_list[droq->read_idx].buffer = NULL;
724 droq->read_idx = sdp_incr_index(droq->read_idx, 1,/* count */
725 droq->nb_desc /* max rd idx */);
726 droq->refill_count++;
732 return SDP_OQ_RECV_SUCCESS;
735 return SDP_OQ_RECV_FAILED;
738 static inline uint32_t
739 sdp_check_droq_pkts(struct sdp_droq *droq, uint32_t burst_size)
741 uint32_t min_pkts = 0;
745 /* Latest available OQ packets */
746 pkt_count = rte_read32(droq->pkts_sent_reg);
748 /* Newly arrived packets */
749 new_pkts = pkt_count - droq->last_pkt_count;
750 otx2_sdp_dbg("Recvd [%d] new OQ pkts", new_pkts);
752 min_pkts = (new_pkts > burst_size) ? burst_size : new_pkts;
754 rte_atomic64_add(&droq->pkts_pending, min_pkts);
755 /* Back up the aggregated packet count so far */
756 droq->last_pkt_count += min_pkts;
762 /* Check for response arrival from OCTEON TX2
763 * returns number of requests completed
766 sdp_rawdev_dequeue(struct rte_rawdev *rawdev,
767 struct rte_rawdev_buf **buffers, unsigned int count,
768 rte_rawdev_obj_t context __rte_unused)
770 struct sdp_droq_pkt *oq_pkt;
771 struct sdp_device *sdpvf;
772 struct sdp_droq *droq;
774 uint32_t q_no = 0, pkts;
778 sdpvf = (struct sdp_device *)rawdev->dev_private;
780 droq = sdpvf->droq[q_no];
782 otx2_err("Invalid droq[%d]", q_no);
787 rte_spinlock_lock(&droq->lock);
789 new_pkts = sdp_check_droq_pkts(droq, count);
791 otx2_sdp_dbg("Zero new_pkts:%d", new_pkts);
792 goto deq_fail; /* No pkts at this moment */
795 otx2_sdp_dbg("Received new_pkts = %d", new_pkts);
797 for (pkts = 0; pkts < new_pkts; pkts++) {
799 /* Push the received pkt to application */
800 oq_pkt = (struct sdp_droq_pkt *)buffers[pkts];
802 ret = sdp_droq_read_packet(sdpvf, droq, oq_pkt);
804 otx2_err("DROQ read pakt failed.");
809 droq->stats.pkts_received++;
810 droq->stats.bytes_received += oq_pkt->len;
813 /* Ack the h/w with no# of pkts read by Host */
814 rte_write32(pkts, droq->pkts_sent_reg);
817 droq->last_pkt_count -= pkts;
819 otx2_sdp_dbg("DROQ pkts[%d] pushed to application", pkts);
821 /* Refill DROQ buffers */
822 if (droq->refill_count >= 2 /* droq->refill_threshold */) {
823 int desc_refilled = sdp_droq_refill(sdpvf, droq);
825 /* Flush the droq descriptor data to memory to be sure
826 * that when we update the credits the data in memory is
829 rte_write32(desc_refilled, droq->pkts_credit_reg);
831 /* Ensure mmio write completes */
833 otx2_sdp_dbg("Refilled count = %d", desc_refilled);
836 /* Release the spin lock */
837 rte_spinlock_unlock(&droq->lock);
842 rte_spinlock_unlock(&droq->lock);
845 return SDP_OQ_RECV_FAILED;