1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #include <rte_mempool.h>
10 #include <ethdev_pci.h>
12 #include "otx_ep_common.h"
13 #include "otx_ep_vf.h"
14 #include "otx2_ep_vf.h"
15 #include "otx_ep_rxtx.h"
18 otx_ep_dmazone_free(const struct rte_memzone *mz)
20 const struct rte_memzone *mz_tmp;
24 otx_ep_err("Memzone %s : NULL\n", mz->name);
28 mz_tmp = rte_memzone_lookup(mz->name);
30 otx_ep_err("Memzone %s Not Found\n", mz->name);
34 ret = rte_memzone_free(mz);
36 otx_ep_err("Memzone free failed : ret = %d\n", ret);
39 /* Free IQ resources */
41 otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no)
43 struct otx_ep_instr_queue *iq;
45 iq = otx_ep->instr_queue[iq_no];
47 otx_ep_err("Invalid IQ[%d]\n", iq_no);
51 rte_free(iq->req_list);
55 otx_ep_dmazone_free(iq->iq_mz);
59 rte_free(otx_ep->instr_queue[iq_no]);
60 otx_ep->instr_queue[iq_no] = NULL;
62 otx_ep->nb_tx_queues--;
64 otx_ep_info("IQ[%d] is deleted\n", iq_no);
69 /* IQ initialization */
71 otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
72 unsigned int socket_id)
74 const struct otx_ep_config *conf;
75 struct otx_ep_instr_queue *iq;
79 iq = otx_ep->instr_queue[iq_no];
80 q_size = conf->iq.instr_type * num_descs;
82 /* IQ memory creation for Instruction submission to OCTEON TX2 */
83 iq->iq_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev,
84 "instr_queue", iq_no, q_size,
85 OTX_EP_PCI_RING_ALIGN,
87 if (iq->iq_mz == NULL) {
88 otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no);
92 iq->base_addr_dma = iq->iq_mz->iova;
93 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
95 if (num_descs & (num_descs - 1)) {
96 otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no);
100 iq->nb_desc = num_descs;
102 /* Create a IQ request list to hold requests that have been
103 * posted to OCTEON TX2. This list will be used for freeing the IQ
104 * data buffer(s) later once the OCTEON TX2 fetched the requests.
106 iq->req_list = rte_zmalloc_socket("request_list",
107 (iq->nb_desc * OTX_EP_IQREQ_LIST_SIZE),
110 if (iq->req_list == NULL) {
111 otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no);
115 otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n",
116 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
119 iq->otx_ep_dev = otx_ep;
122 iq->host_write_index = 0;
123 iq->otx_read_index = 0;
125 iq->instr_pending = 0;
127 otx_ep->io_qmask.iq |= (1ull << iq_no);
129 /* Set 32B/64B mode for each input queue */
130 if (conf->iq.instr_type == 64)
131 otx_ep->io_qmask.iq64B |= (1ull << iq_no);
133 iq->iqcmd_64B = (conf->iq.instr_type == 64);
135 /* Set up IQ registers */
136 otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
145 otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs,
146 unsigned int socket_id)
148 struct otx_ep_instr_queue *iq;
150 iq = (struct otx_ep_instr_queue *)rte_zmalloc("otx_ep_IQ", sizeof(*iq),
151 RTE_CACHE_LINE_SIZE);
155 otx_ep->instr_queue[iq_no] = iq;
157 if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) {
158 otx_ep_err("IQ init is failed\n");
161 otx_ep->nb_tx_queues++;
163 otx_ep_info("IQ[%d] is created.\n", iq_no);
168 otx_ep_delete_iqs(otx_ep, iq_no);
173 otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
177 droq->refill_idx = 0;
178 droq->refill_count = 0;
179 droq->last_pkt_count = 0;
180 droq->pkts_pending = 0;
184 otx_ep_droq_destroy_ring_buffers(struct otx_ep_droq *droq)
188 for (idx = 0; idx < droq->nb_desc; idx++) {
189 if (droq->recv_buf_list[idx]) {
190 rte_pktmbuf_free(droq->recv_buf_list[idx]);
191 droq->recv_buf_list[idx] = NULL;
195 otx_ep_droq_reset_indices(droq);
198 /* Free OQs resources */
200 otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no)
202 struct otx_ep_droq *droq;
204 droq = otx_ep->droq[oq_no];
206 otx_ep_err("Invalid droq[%d]\n", oq_no);
210 otx_ep_droq_destroy_ring_buffers(droq);
211 rte_free(droq->recv_buf_list);
212 droq->recv_buf_list = NULL;
214 if (droq->desc_ring_mz) {
215 otx_ep_dmazone_free(droq->desc_ring_mz);
216 droq->desc_ring_mz = NULL;
219 memset(droq, 0, OTX_EP_DROQ_SIZE);
221 rte_free(otx_ep->droq[oq_no]);
222 otx_ep->droq[oq_no] = NULL;
224 otx_ep->nb_rx_queues--;
226 otx_ep_info("OQ[%d] is deleted\n", oq_no);
231 otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
233 struct otx_ep_droq_desc *desc_ring = droq->desc_ring;
234 struct otx_ep_droq_info *info;
235 struct rte_mbuf *buf;
238 for (idx = 0; idx < droq->nb_desc; idx++) {
239 buf = rte_pktmbuf_alloc(droq->mpool);
241 otx_ep_err("OQ buffer alloc failed\n");
242 droq->stats.rx_alloc_failure++;
246 droq->recv_buf_list[idx] = buf;
247 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
248 memset(info, 0, sizeof(*info));
249 desc_ring[idx].buffer_ptr = rte_mbuf_data_iova_default(buf);
252 otx_ep_droq_reset_indices(droq);
257 /* OQ initialization */
259 otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
260 uint32_t num_descs, uint32_t desc_size,
261 struct rte_mempool *mpool, unsigned int socket_id)
263 const struct otx_ep_config *conf = otx_ep->conf;
264 uint32_t c_refill_threshold;
265 struct otx_ep_droq *droq;
266 uint32_t desc_ring_size;
268 otx_ep_info("OQ[%d] Init start\n", q_no);
270 droq = otx_ep->droq[q_no];
271 droq->otx_ep_dev = otx_ep;
275 droq->nb_desc = num_descs;
276 droq->buffer_size = desc_size;
277 c_refill_threshold = RTE_MAX(conf->oq.refill_threshold,
280 /* OQ desc_ring set up */
281 desc_ring_size = droq->nb_desc * OTX_EP_DROQ_DESC_SIZE;
282 droq->desc_ring_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev, "droq",
283 q_no, desc_ring_size,
284 OTX_EP_PCI_RING_ALIGN,
287 if (droq->desc_ring_mz == NULL) {
288 otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no);
292 droq->desc_ring_dma = droq->desc_ring_mz->iova;
293 droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr;
295 otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
296 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
297 otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc);
299 /* OQ buf_list set up */
300 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
301 (droq->nb_desc * sizeof(struct rte_mbuf *)),
302 RTE_CACHE_LINE_SIZE, socket_id);
303 if (droq->recv_buf_list == NULL) {
304 otx_ep_err("OQ recv_buf_list alloc failed\n");
308 if (otx_ep_droq_setup_ring_buffers(droq))
311 droq->refill_threshold = c_refill_threshold;
313 /* Set up OQ registers */
314 otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
316 otx_ep->io_qmask.oq |= (1ull << q_no);
324 /* OQ configuration and setup */
326 otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
327 int desc_size, struct rte_mempool *mpool,
328 unsigned int socket_id)
330 struct otx_ep_droq *droq;
332 /* Allocate new droq. */
333 droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ",
334 sizeof(*droq), RTE_CACHE_LINE_SIZE);
336 otx_ep_err("Droq[%d] Creation Failed\n", oq_no);
339 otx_ep->droq[oq_no] = droq;
341 if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool,
343 otx_ep_err("Droq[%d] Initialization failed\n", oq_no);
346 otx_ep_info("OQ[%d] is created.\n", oq_no);
348 otx_ep->nb_rx_queues++;
353 otx_ep_delete_oqs(otx_ep, oq_no);