1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #include <rte_mempool.h>
12 #include <ethdev_pci.h>
14 #include "otx_ep_common.h"
15 #include "otx_ep_vf.h"
16 #include "otx2_ep_vf.h"
17 #include "otx_ep_rxtx.h"
19 /* SDP_LENGTH_S specifies packet length and is of 8-byte size */
21 #define DROQ_REFILL_THRESHOLD 16
24 otx_ep_dmazone_free(const struct rte_memzone *mz)
26 const struct rte_memzone *mz_tmp;
30 otx_ep_err("Memzone %s : NULL\n", mz->name);
34 mz_tmp = rte_memzone_lookup(mz->name);
36 otx_ep_err("Memzone %s Not Found\n", mz->name);
40 ret = rte_memzone_free(mz);
42 otx_ep_err("Memzone free failed : ret = %d\n", ret);
45 /* Free IQ resources */
47 otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no)
49 struct otx_ep_instr_queue *iq;
51 iq = otx_ep->instr_queue[iq_no];
53 otx_ep_err("Invalid IQ[%d]\n", iq_no);
57 rte_free(iq->req_list);
61 otx_ep_dmazone_free(iq->iq_mz);
65 rte_free(otx_ep->instr_queue[iq_no]);
66 otx_ep->instr_queue[iq_no] = NULL;
68 otx_ep->nb_tx_queues--;
70 otx_ep_info("IQ[%d] is deleted\n", iq_no);
75 /* IQ initialization */
77 otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
78 unsigned int socket_id)
80 const struct otx_ep_config *conf;
81 struct otx_ep_instr_queue *iq;
85 iq = otx_ep->instr_queue[iq_no];
86 q_size = conf->iq.instr_type * num_descs;
88 /* IQ memory creation for Instruction submission to OCTEON TX2 */
89 iq->iq_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev,
90 "instr_queue", iq_no, q_size,
91 OTX_EP_PCI_RING_ALIGN,
93 if (iq->iq_mz == NULL) {
94 otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no);
98 iq->base_addr_dma = iq->iq_mz->iova;
99 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
101 if (num_descs & (num_descs - 1)) {
102 otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no);
106 iq->nb_desc = num_descs;
108 /* Create a IQ request list to hold requests that have been
109 * posted to OCTEON TX2. This list will be used for freeing the IQ
110 * data buffer(s) later once the OCTEON TX2 fetched the requests.
112 iq->req_list = rte_zmalloc_socket("request_list",
113 (iq->nb_desc * OTX_EP_IQREQ_LIST_SIZE),
116 if (iq->req_list == NULL) {
117 otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no);
121 otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n",
122 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
125 iq->otx_ep_dev = otx_ep;
128 iq->host_write_index = 0;
129 iq->otx_read_index = 0;
131 iq->instr_pending = 0;
133 otx_ep->io_qmask.iq |= (1ull << iq_no);
135 /* Set 32B/64B mode for each input queue */
136 if (conf->iq.instr_type == 64)
137 otx_ep->io_qmask.iq64B |= (1ull << iq_no);
139 iq->iqcmd_64B = (conf->iq.instr_type == 64);
141 /* Set up IQ registers */
142 otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
151 otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs,
152 unsigned int socket_id)
154 struct otx_ep_instr_queue *iq;
156 iq = (struct otx_ep_instr_queue *)rte_zmalloc("otx_ep_IQ", sizeof(*iq),
157 RTE_CACHE_LINE_SIZE);
161 otx_ep->instr_queue[iq_no] = iq;
163 if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) {
164 otx_ep_err("IQ init is failed\n");
167 otx_ep->nb_tx_queues++;
169 otx_ep_info("IQ[%d] is created.\n", iq_no);
174 otx_ep_delete_iqs(otx_ep, iq_no);
179 otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
183 droq->refill_idx = 0;
184 droq->refill_count = 0;
185 droq->last_pkt_count = 0;
186 droq->pkts_pending = 0;
190 otx_ep_droq_destroy_ring_buffers(struct otx_ep_droq *droq)
194 for (idx = 0; idx < droq->nb_desc; idx++) {
195 if (droq->recv_buf_list[idx]) {
196 rte_pktmbuf_free(droq->recv_buf_list[idx]);
197 droq->recv_buf_list[idx] = NULL;
201 otx_ep_droq_reset_indices(droq);
204 /* Free OQs resources */
206 otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no)
208 struct otx_ep_droq *droq;
210 droq = otx_ep->droq[oq_no];
212 otx_ep_err("Invalid droq[%d]\n", oq_no);
216 otx_ep_droq_destroy_ring_buffers(droq);
217 rte_free(droq->recv_buf_list);
218 droq->recv_buf_list = NULL;
220 if (droq->desc_ring_mz) {
221 otx_ep_dmazone_free(droq->desc_ring_mz);
222 droq->desc_ring_mz = NULL;
225 memset(droq, 0, OTX_EP_DROQ_SIZE);
227 rte_free(otx_ep->droq[oq_no]);
228 otx_ep->droq[oq_no] = NULL;
230 otx_ep->nb_rx_queues--;
232 otx_ep_info("OQ[%d] is deleted\n", oq_no);
237 otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
239 struct otx_ep_droq_desc *desc_ring = droq->desc_ring;
240 struct otx_ep_droq_info *info;
241 struct rte_mbuf *buf;
244 for (idx = 0; idx < droq->nb_desc; idx++) {
245 buf = rte_pktmbuf_alloc(droq->mpool);
247 otx_ep_err("OQ buffer alloc failed\n");
248 droq->stats.rx_alloc_failure++;
252 droq->recv_buf_list[idx] = buf;
253 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
254 memset(info, 0, sizeof(*info));
255 desc_ring[idx].buffer_ptr = rte_mbuf_data_iova_default(buf);
258 otx_ep_droq_reset_indices(droq);
263 /* OQ initialization */
265 otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
266 uint32_t num_descs, uint32_t desc_size,
267 struct rte_mempool *mpool, unsigned int socket_id)
269 const struct otx_ep_config *conf = otx_ep->conf;
270 uint32_t c_refill_threshold;
271 struct otx_ep_droq *droq;
272 uint32_t desc_ring_size;
274 otx_ep_info("OQ[%d] Init start\n", q_no);
276 droq = otx_ep->droq[q_no];
277 droq->otx_ep_dev = otx_ep;
281 droq->nb_desc = num_descs;
282 droq->buffer_size = desc_size;
283 c_refill_threshold = RTE_MAX(conf->oq.refill_threshold,
286 /* OQ desc_ring set up */
287 desc_ring_size = droq->nb_desc * OTX_EP_DROQ_DESC_SIZE;
288 droq->desc_ring_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev, "droq",
289 q_no, desc_ring_size,
290 OTX_EP_PCI_RING_ALIGN,
293 if (droq->desc_ring_mz == NULL) {
294 otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no);
298 droq->desc_ring_dma = droq->desc_ring_mz->iova;
299 droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr;
301 otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
302 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
303 otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc);
305 /* OQ buf_list set up */
306 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
307 (droq->nb_desc * sizeof(struct rte_mbuf *)),
308 RTE_CACHE_LINE_SIZE, socket_id);
309 if (droq->recv_buf_list == NULL) {
310 otx_ep_err("OQ recv_buf_list alloc failed\n");
314 if (otx_ep_droq_setup_ring_buffers(droq))
317 droq->refill_threshold = c_refill_threshold;
319 /* Set up OQ registers */
320 otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
322 otx_ep->io_qmask.oq |= (1ull << q_no);
330 /* OQ configuration and setup */
332 otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
333 int desc_size, struct rte_mempool *mpool,
334 unsigned int socket_id)
336 struct otx_ep_droq *droq;
338 /* Allocate new droq. */
339 droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ",
340 sizeof(*droq), RTE_CACHE_LINE_SIZE);
342 otx_ep_err("Droq[%d] Creation Failed\n", oq_no);
345 otx_ep->droq[oq_no] = droq;
347 if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool,
349 otx_ep_err("Droq[%d] Initialization failed\n", oq_no);
352 otx_ep_info("OQ[%d] is created.\n", oq_no);
354 otx_ep->nb_rx_queues++;
359 otx_ep_delete_oqs(otx_ep, oq_no);
364 otx_ep_droq_refill(struct otx_ep_droq *droq)
366 struct otx_ep_droq_desc *desc_ring;
367 struct otx_ep_droq_info *info;
368 struct rte_mbuf *buf = NULL;
369 uint32_t desc_refilled = 0;
371 desc_ring = droq->desc_ring;
373 while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
374 /* If a valid buffer exists (happens if there is no dispatch),
375 * reuse the buffer, else allocate.
377 if (droq->recv_buf_list[droq->refill_idx] != NULL)
380 buf = rte_pktmbuf_alloc(droq->mpool);
381 /* If a buffer could not be allocated, no point in
385 droq->stats.rx_alloc_failure++;
388 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
389 memset(info, 0, sizeof(*info));
391 droq->recv_buf_list[droq->refill_idx] = buf;
392 desc_ring[droq->refill_idx].buffer_ptr =
393 rte_mbuf_data_iova_default(buf);
396 droq->refill_idx = otx_ep_incr_index(droq->refill_idx, 1,
400 droq->refill_count--;
403 return desc_refilled;
406 static struct rte_mbuf *
407 otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
408 struct otx_ep_droq *droq, int next_fetch)
410 volatile struct otx_ep_droq_info *info;
411 struct rte_mbuf *droq_pkt2 = NULL;
412 struct rte_mbuf *droq_pkt = NULL;
413 struct rte_net_hdr_lens hdr_lens;
414 struct otx_ep_droq_info *info2;
415 uint64_t total_pkt_len;
416 uint32_t pkt_len = 0;
419 droq_pkt = droq->recv_buf_list[droq->read_idx];
420 droq_pkt2 = droq->recv_buf_list[droq->read_idx];
421 info = rte_pktmbuf_mtod(droq_pkt, struct otx_ep_droq_info *);
422 /* make sure info is available */
424 if (unlikely(!info->length)) {
425 int retry = OTX_EP_MAX_DELAYED_PKT_RETRIES;
426 /* otx_ep_dbg("OCTEON DROQ[%d]: read_idx: %d; Data not ready "
427 * "yet, Retry; pending=%lu\n", droq->q_no, droq->read_idx,
428 * droq->pkts_pending);
430 droq->stats.pkts_delayed_data++;
431 while (retry && !info->length)
433 if (!retry && !info->length) {
434 otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
435 droq->q_no, droq->read_idx);
436 /* May be zero length packet; drop it */
437 rte_pktmbuf_free(droq_pkt);
438 droq->recv_buf_list[droq->read_idx] = NULL;
439 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
441 droq->stats.dropped_zlp++;
442 droq->refill_count++;
447 next_idx = otx_ep_incr_index(droq->read_idx, 1, droq->nb_desc);
448 droq_pkt2 = droq->recv_buf_list[next_idx];
449 info2 = rte_pktmbuf_mtod(droq_pkt2, struct otx_ep_droq_info *);
450 rte_prefetch_non_temporal((const void *)info2);
453 info->length = rte_bswap64(info->length);
454 /* Deduce the actual data size */
455 total_pkt_len = info->length + INFO_SIZE;
456 if (total_pkt_len <= droq->buffer_size) {
457 info->length -= OTX_EP_RH_SIZE;
458 droq_pkt = droq->recv_buf_list[droq->read_idx];
459 if (likely(droq_pkt != NULL)) {
460 droq_pkt->data_off += OTX_EP_DROQ_INFO_SIZE;
461 /* otx_ep_dbg("OQ: pkt_len[%ld], buffer_size %d\n",
462 * (long)info->length, droq->buffer_size);
464 pkt_len = (uint32_t)info->length;
465 droq_pkt->pkt_len = pkt_len;
466 droq_pkt->data_len = pkt_len;
467 droq_pkt->port = otx_ep->port_id;
468 droq->recv_buf_list[droq->read_idx] = NULL;
469 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
471 droq->refill_count++;
474 struct rte_mbuf *first_buf = NULL;
475 struct rte_mbuf *last_buf = NULL;
477 while (pkt_len < total_pkt_len) {
480 cpy_len = ((pkt_len + droq->buffer_size) >
482 ? ((uint32_t)total_pkt_len -
486 droq_pkt = droq->recv_buf_list[droq->read_idx];
487 droq->recv_buf_list[droq->read_idx] = NULL;
489 if (likely(droq_pkt != NULL)) {
490 /* Note the first seg */
492 first_buf = droq_pkt;
494 droq_pkt->port = otx_ep->port_id;
496 droq_pkt->data_off +=
497 OTX_EP_DROQ_INFO_SIZE;
499 cpy_len - OTX_EP_DROQ_INFO_SIZE;
501 cpy_len - OTX_EP_DROQ_INFO_SIZE;
503 droq_pkt->pkt_len = cpy_len;
504 droq_pkt->data_len = cpy_len;
508 first_buf->nb_segs++;
509 first_buf->pkt_len += droq_pkt->pkt_len;
513 last_buf->next = droq_pkt;
517 otx_ep_err("no buf\n");
521 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
523 droq->refill_count++;
525 droq_pkt = first_buf;
527 droq_pkt->packet_type = rte_net_get_ptype(droq_pkt, &hdr_lens,
529 droq_pkt->l2_len = hdr_lens.l2_len;
530 droq_pkt->l3_len = hdr_lens.l3_len;
531 droq_pkt->l4_len = hdr_lens.l4_len;
533 if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
534 !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
535 rte_pktmbuf_free(droq_pkt);
539 if (droq_pkt->nb_segs > 1 &&
540 !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
541 rte_pktmbuf_free(droq_pkt);
551 static inline uint32_t
552 otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
554 volatile uint64_t pkt_count;
557 /* Latest available OQ packets */
558 pkt_count = rte_read32(droq->pkts_sent_reg);
559 rte_write32(pkt_count, droq->pkts_sent_reg);
560 new_pkts = pkt_count;
561 droq->pkts_pending += new_pkts;
565 /* Check for response arrival from OCTEON TX2
566 * returns number of requests completed
569 otx_ep_recv_pkts(void *rx_queue,
570 struct rte_mbuf **rx_pkts,
573 struct otx_ep_droq *droq = rx_queue;
574 struct otx_ep_device *otx_ep;
575 struct rte_mbuf *oq_pkt;
578 uint32_t new_pkts = 0;
581 otx_ep = droq->otx_ep_dev;
583 if (droq->pkts_pending > budget) {
586 new_pkts = droq->pkts_pending;
587 new_pkts += otx_ep_check_droq_pkts(droq);
588 if (new_pkts > budget)
593 goto update_credit; /* No pkts at this moment */
595 for (pkts = 0; pkts < new_pkts; pkts++) {
596 /* Push the received pkt to application */
597 next_fetch = (pkts == new_pkts - 1) ? 0 : 1;
598 oq_pkt = otx_ep_droq_read_packet(otx_ep, droq, next_fetch);
601 "DROQ read pkt failed pending %" PRIu64
602 "last_pkt_count %" PRIu64 "new_pkts %d.\n",
603 droq->pkts_pending, droq->last_pkt_count,
605 droq->pkts_pending -= pkts;
606 droq->stats.rx_err++;
609 rx_pkts[pkts] = oq_pkt;
611 droq->stats.pkts_received++;
612 droq->stats.bytes_received += oq_pkt->pkt_len;
614 droq->pkts_pending -= pkts;
616 /* Refill DROQ buffers */
618 if (droq->refill_count >= DROQ_REFILL_THRESHOLD) {
619 int desc_refilled = otx_ep_droq_refill(droq);
621 /* Flush the droq descriptor data to memory to be sure
622 * that when we update the credits the data in memory is
626 rte_write32(desc_refilled, droq->pkts_credit_reg);
629 * SDP output goes into DROP state when output doorbell count
630 * goes below drop count. When door bell count is written with
631 * a value greater than drop count SDP output should come out
632 * of DROP state. Due to a race condition this is not happening.
633 * Writing doorbell register with 0 again may make SDP output
634 * come out of this state.
637 rte_write32(0, droq->pkts_credit_reg);