1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
11 #include <rte_bus_pci.h>
13 #include <rte_lcore.h>
14 #include <rte_mempool.h>
17 #include <rte_common.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
21 #include "otx2_common.h"
22 #include "otx2_ep_enqdeq.h"
24 /* IQ initialization */
26 sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no)
28 const struct sdp_config *conf;
29 struct sdp_instr_queue *iq;
33 iq = sdpvf->instr_queue[iq_no];
34 q_size = conf->iq.instr_type * conf->num_iqdef_descs;
36 /* IQ memory creation for Instruction submission to OCTEON TX2 */
37 iq->iq_mz = rte_memzone_reserve_aligned("iqmz",
40 RTE_MEMZONE_IOVA_CONTIG,
42 if (iq->iq_mz == NULL) {
43 otx2_err("IQ[%d] memzone alloc failed", iq_no);
47 iq->base_addr_dma = iq->iq_mz->iova;
48 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
50 if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) {
51 otx2_err("IQ[%d] descs not in power of 2", iq_no);
55 iq->nb_desc = conf->num_iqdef_descs;
57 /* Create a IQ request list to hold requests that have been
58 * posted to OCTEON TX2. This list will be used for freeing the IQ
59 * data buffer(s) later once the OCTEON TX2 fetched the requests.
61 iq->req_list = rte_zmalloc_socket("request_list",
62 (iq->nb_desc * SDP_IQREQ_LIST_SIZE),
65 if (iq->req_list == NULL) {
66 otx2_err("IQ[%d] req_list alloc failed", iq_no);
70 otx2_info("IQ[%d]: base: %p basedma: %lx count: %d",
71 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
77 iq->host_write_index = 0;
78 iq->otx_read_index = 0;
81 /* Initialize the spinlock for this instruction queue */
82 rte_spinlock_init(&iq->lock);
83 rte_spinlock_init(&iq->post_lock);
85 rte_atomic64_clear(&iq->iq_flush_running);
87 sdpvf->io_qmask.iq |= (1ull << iq_no);
89 /* Set 32B/64B mode for each input queue */
90 if (conf->iq.instr_type == 64)
91 sdpvf->io_qmask.iq64B |= (1ull << iq_no);
93 iq->iqcmd_64B = (conf->iq.instr_type == 64);
95 /* Set up IQ registers */
96 sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no);
106 sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
108 struct sdp_instr_queue *iq;
110 iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq),
111 RTE_CACHE_LINE_SIZE);
115 sdpvf->instr_queue[iq_no] = iq;
117 if (sdp_init_instr_queue(sdpvf, iq_no)) {
118 otx2_err("IQ init is failed");
121 otx2_info("IQ[%d] is created.", sdpvf->num_iqs);
133 sdp_droq_reset_indices(struct sdp_droq *droq)
137 droq->refill_idx = 0;
138 droq->refill_count = 0;
139 rte_atomic64_set(&droq->pkts_pending, 0);
143 sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf,
144 struct sdp_droq *droq)
146 struct sdp_droq_desc *desc_ring = droq->desc_ring;
150 for (idx = 0; idx < droq->nb_desc; idx++) {
151 rte_mempool_get(sdpvf->enqdeq_mpool, &buf);
153 otx2_err("OQ buffer alloc failed");
154 /* sdp_droq_destroy_ring_buffers(droq);*/
158 droq->recv_buf_list[idx].buffer = buf;
159 droq->info_list[idx].length = 0;
161 /* Map ring buffers into memory */
162 desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma +
163 (idx * SDP_DROQ_INFO_SIZE));
165 desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf);
168 sdp_droq_reset_indices(droq);
174 sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused,
175 struct sdp_droq *droq)
177 droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list",
178 (droq->nb_desc * SDP_DROQ_INFO_SIZE),
180 RTE_MEMZONE_IOVA_CONTIG,
181 RTE_CACHE_LINE_SIZE);
183 if (droq->info_mz == NULL)
186 droq->info_list_dma = droq->info_mz->iova;
187 droq->info_alloc_size = droq->info_mz->len;
188 droq->info_base_addr = (size_t)droq->info_mz->addr;
190 return droq->info_mz->addr;
193 /* OQ initialization */
195 sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no)
197 const struct sdp_config *conf = sdpvf->conf;
198 uint32_t c_refill_threshold;
199 uint32_t desc_ring_size;
200 struct sdp_droq *droq;
202 otx2_info("OQ[%d] Init start", q_no);
204 droq = sdpvf->droq[q_no];
205 droq->sdp_dev = sdpvf;
208 c_refill_threshold = conf->oq.refill_threshold;
209 droq->nb_desc = conf->num_oqdef_descs;
210 droq->buffer_size = conf->oqdef_buf_size;
212 /* OQ desc_ring set up */
213 desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE;
214 droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz",
217 RTE_MEMZONE_IOVA_CONTIG,
218 RTE_CACHE_LINE_SIZE);
220 if (droq->desc_ring_mz == NULL) {
221 otx2_err("OQ:%d desc_ring allocation failed", q_no);
225 droq->desc_ring_dma = droq->desc_ring_mz->iova;
226 droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr;
228 otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx",
229 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
230 otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc);
233 /* OQ info_list set up */
234 droq->info_list = sdp_alloc_info_buffer(sdpvf, droq);
235 if (droq->info_list == NULL) {
236 otx2_err("memory allocation failed for OQ[%d] info_list", q_no);
240 /* OQ buf_list set up */
241 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
242 (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE),
243 RTE_CACHE_LINE_SIZE, rte_socket_id());
244 if (droq->recv_buf_list == NULL) {
245 otx2_err("OQ recv_buf_list alloc failed");
249 if (sdp_droq_setup_ring_buffers(sdpvf, droq))
252 droq->refill_threshold = c_refill_threshold;
253 rte_spinlock_init(&droq->lock);
256 /* Set up OQ registers */
257 sdpvf->fn_list.setup_oq_regs(sdpvf, q_no);
259 sdpvf->io_qmask.oq |= (1ull << q_no);
267 /* OQ configuration and setup */
269 sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
271 struct sdp_droq *droq;
273 /* Allocate new droq. */
274 droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ",
275 sizeof(*droq), RTE_CACHE_LINE_SIZE);
277 otx2_err("Droq[%d] Creation Failed", oq_no);
280 sdpvf->droq[oq_no] = droq;
282 if (sdp_init_droq(sdpvf, oq_no)) {
283 otx2_err("Droq[%d] Initialization failed", oq_no);
286 otx2_info("OQ[%d] is created.", oq_no);