1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
8 #include <rte_mempool.h>
12 #include <ethdev_pci.h>
14 #include "otx_ep_common.h"
15 #include "otx_ep_vf.h"
16 #include "otx2_ep_vf.h"
17 #include "otx_ep_rxtx.h"
19 /* SDP_LENGTH_S specifies packet length and is of 8-byte size */
21 #define DROQ_REFILL_THRESHOLD 16
24 otx_ep_dmazone_free(const struct rte_memzone *mz)
26 const struct rte_memzone *mz_tmp;
30 otx_ep_err("Memzone: NULL\n");
34 mz_tmp = rte_memzone_lookup(mz->name);
36 otx_ep_err("Memzone %s Not Found\n", mz->name);
40 ret = rte_memzone_free(mz);
42 otx_ep_err("Memzone free failed : ret = %d\n", ret);
45 /* Free IQ resources */
47 otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no)
49 struct otx_ep_instr_queue *iq;
51 iq = otx_ep->instr_queue[iq_no];
53 otx_ep_err("Invalid IQ[%d]\n", iq_no);
57 rte_free(iq->req_list);
61 otx_ep_dmazone_free(iq->iq_mz);
65 rte_free(otx_ep->instr_queue[iq_no]);
66 otx_ep->instr_queue[iq_no] = NULL;
68 otx_ep->nb_tx_queues--;
70 otx_ep_info("IQ[%d] is deleted\n", iq_no);
75 /* IQ initialization */
77 otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
78 unsigned int socket_id)
80 const struct otx_ep_config *conf;
81 struct otx_ep_instr_queue *iq;
85 iq = otx_ep->instr_queue[iq_no];
86 q_size = conf->iq.instr_type * num_descs;
88 /* IQ memory creation for Instruction submission to OCTEON 9 */
89 iq->iq_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev,
90 "instr_queue", iq_no, q_size,
91 OTX_EP_PCI_RING_ALIGN,
93 if (iq->iq_mz == NULL) {
94 otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no);
98 iq->base_addr_dma = iq->iq_mz->iova;
99 iq->base_addr = (uint8_t *)iq->iq_mz->addr;
101 if (num_descs & (num_descs - 1)) {
102 otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no);
106 iq->nb_desc = num_descs;
108 /* Create a IQ request list to hold requests that have been
109 * posted to OCTEON 9. This list will be used for freeing the IQ
110 * data buffer(s) later once the OCTEON 9 fetched the requests.
112 iq->req_list = rte_zmalloc_socket("request_list",
113 (iq->nb_desc * OTX_EP_IQREQ_LIST_SIZE),
116 if (iq->req_list == NULL) {
117 otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no);
121 otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n",
122 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
125 iq->otx_ep_dev = otx_ep;
128 iq->host_write_index = 0;
129 iq->otx_read_index = 0;
131 iq->instr_pending = 0;
133 otx_ep->io_qmask.iq |= (1ull << iq_no);
135 /* Set 32B/64B mode for each input queue */
136 if (conf->iq.instr_type == 64)
137 otx_ep->io_qmask.iq64B |= (1ull << iq_no);
139 iq->iqcmd_64B = (conf->iq.instr_type == 64);
141 /* Set up IQ registers */
142 otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
151 otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs,
152 unsigned int socket_id)
154 struct otx_ep_instr_queue *iq;
156 iq = (struct otx_ep_instr_queue *)rte_zmalloc("otx_ep_IQ", sizeof(*iq),
157 RTE_CACHE_LINE_SIZE);
161 otx_ep->instr_queue[iq_no] = iq;
163 if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) {
164 otx_ep_err("IQ init is failed\n");
167 otx_ep->nb_tx_queues++;
169 otx_ep_info("IQ[%d] is created.\n", iq_no);
174 otx_ep_delete_iqs(otx_ep, iq_no);
179 otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
183 droq->refill_idx = 0;
184 droq->refill_count = 0;
185 droq->last_pkt_count = 0;
186 droq->pkts_pending = 0;
190 otx_ep_droq_destroy_ring_buffers(struct otx_ep_droq *droq)
194 for (idx = 0; idx < droq->nb_desc; idx++) {
195 if (droq->recv_buf_list[idx]) {
196 rte_pktmbuf_free(droq->recv_buf_list[idx]);
197 droq->recv_buf_list[idx] = NULL;
201 otx_ep_droq_reset_indices(droq);
204 /* Free OQs resources */
206 otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no)
208 struct otx_ep_droq *droq;
210 droq = otx_ep->droq[oq_no];
212 otx_ep_err("Invalid droq[%d]\n", oq_no);
216 otx_ep_droq_destroy_ring_buffers(droq);
217 rte_free(droq->recv_buf_list);
218 droq->recv_buf_list = NULL;
220 if (droq->desc_ring_mz) {
221 otx_ep_dmazone_free(droq->desc_ring_mz);
222 droq->desc_ring_mz = NULL;
225 memset(droq, 0, OTX_EP_DROQ_SIZE);
227 rte_free(otx_ep->droq[oq_no]);
228 otx_ep->droq[oq_no] = NULL;
230 otx_ep->nb_rx_queues--;
232 otx_ep_info("OQ[%d] is deleted\n", oq_no);
237 otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
239 struct otx_ep_droq_desc *desc_ring = droq->desc_ring;
240 struct otx_ep_droq_info *info;
241 struct rte_mbuf *buf;
244 for (idx = 0; idx < droq->nb_desc; idx++) {
245 buf = rte_pktmbuf_alloc(droq->mpool);
247 otx_ep_err("OQ buffer alloc failed\n");
248 droq->stats.rx_alloc_failure++;
252 droq->recv_buf_list[idx] = buf;
253 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
254 memset(info, 0, sizeof(*info));
255 desc_ring[idx].buffer_ptr = rte_mbuf_data_iova_default(buf);
258 otx_ep_droq_reset_indices(droq);
263 /* OQ initialization */
265 otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
266 uint32_t num_descs, uint32_t desc_size,
267 struct rte_mempool *mpool, unsigned int socket_id)
269 const struct otx_ep_config *conf = otx_ep->conf;
270 uint32_t c_refill_threshold;
271 struct otx_ep_droq *droq;
272 uint32_t desc_ring_size;
274 otx_ep_info("OQ[%d] Init start\n", q_no);
276 droq = otx_ep->droq[q_no];
277 droq->otx_ep_dev = otx_ep;
281 droq->nb_desc = num_descs;
282 droq->buffer_size = desc_size;
283 c_refill_threshold = RTE_MAX(conf->oq.refill_threshold,
286 /* OQ desc_ring set up */
287 desc_ring_size = droq->nb_desc * OTX_EP_DROQ_DESC_SIZE;
288 droq->desc_ring_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev, "droq",
289 q_no, desc_ring_size,
290 OTX_EP_PCI_RING_ALIGN,
293 if (droq->desc_ring_mz == NULL) {
294 otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no);
298 droq->desc_ring_dma = droq->desc_ring_mz->iova;
299 droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr;
301 otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
302 q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
303 otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc);
305 /* OQ buf_list set up */
306 droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
307 (droq->nb_desc * sizeof(struct rte_mbuf *)),
308 RTE_CACHE_LINE_SIZE, socket_id);
309 if (droq->recv_buf_list == NULL) {
310 otx_ep_err("OQ recv_buf_list alloc failed\n");
314 if (otx_ep_droq_setup_ring_buffers(droq))
317 droq->refill_threshold = c_refill_threshold;
319 /* Set up OQ registers */
320 otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
322 otx_ep->io_qmask.oq |= (1ull << q_no);
330 /* OQ configuration and setup */
332 otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
333 int desc_size, struct rte_mempool *mpool,
334 unsigned int socket_id)
336 struct otx_ep_droq *droq;
338 /* Allocate new droq. */
339 droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ",
340 sizeof(*droq), RTE_CACHE_LINE_SIZE);
342 otx_ep_err("Droq[%d] Creation Failed\n", oq_no);
345 otx_ep->droq[oq_no] = droq;
347 if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool,
349 otx_ep_err("Droq[%d] Initialization failed\n", oq_no);
352 otx_ep_info("OQ[%d] is created.\n", oq_no);
354 otx_ep->nb_rx_queues++;
359 otx_ep_delete_oqs(otx_ep, oq_no);
364 otx_ep_iqreq_delete(struct otx_ep_instr_queue *iq, uint32_t idx)
368 struct otx_ep_buf_free_info *finfo;
370 buf = iq->req_list[idx].buf;
371 reqtype = iq->req_list[idx].reqtype;
374 case OTX_EP_REQTYPE_NORESP_NET:
375 rte_pktmbuf_free((struct rte_mbuf *)buf);
376 otx_ep_dbg("IQ buffer freed at idx[%d]\n", idx);
379 case OTX_EP_REQTYPE_NORESP_GATHER:
380 finfo = (struct otx_ep_buf_free_info *)buf;
381 /* This will take care of multiple segments also */
382 rte_pktmbuf_free(finfo->mbuf);
383 rte_free(finfo->g.sg);
387 case OTX_EP_REQTYPE_NONE:
389 otx_ep_info("This iqreq mode is not supported:%d\n", reqtype);
392 /* Reset the request list at this index */
393 iq->req_list[idx].buf = NULL;
394 iq->req_list[idx].reqtype = 0;
398 otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
399 uint32_t reqtype, int index)
401 iq->req_list[index].buf = buf;
402 iq->req_list[index].reqtype = reqtype;
406 otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
408 uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
409 if (unlikely(new_idx == 0xFFFFFFFFU))
410 rte_write32(new_idx, iq->inst_cnt_reg);
411 /* Modulo of the new index with the IQ size will give us
414 new_idx &= (iq->nb_desc - 1);
420 otx_ep_flush_iq(struct otx_ep_instr_queue *iq)
422 uint32_t instr_processed = 0;
424 iq->otx_read_index = otx_vf_update_read_index(iq);
425 while (iq->flush_index != iq->otx_read_index) {
426 /* Free the IQ data buffer to the pool */
427 otx_ep_iqreq_delete(iq, iq->flush_index);
429 otx_ep_incr_index(iq->flush_index, 1, iq->nb_desc);
434 iq->stats.instr_processed = instr_processed;
435 iq->instr_pending -= instr_processed;
439 otx_ep_ring_doorbell(struct otx_ep_device *otx_ep __rte_unused,
440 struct otx_ep_instr_queue *iq)
443 rte_write64(iq->fill_cnt, iq->doorbell_reg);
448 post_iqcmd(struct otx_ep_instr_queue *iq, uint8_t *iqcmd)
450 uint8_t *iqptr, cmdsize;
452 /* This ensures that the read index does not wrap around to
453 * the same position if queue gets full before OCTEON 9 could
456 if (iq->instr_pending > (iq->nb_desc - 1))
457 return OTX_EP_IQ_SEND_FAILED;
459 /* Copy cmd into iq */
461 iqptr = iq->base_addr + (iq->host_write_index << 6);
463 rte_memcpy(iqptr, iqcmd, cmdsize);
465 /* Increment the host write index */
466 iq->host_write_index =
467 otx_ep_incr_index(iq->host_write_index, 1, iq->nb_desc);
471 /* Flush the command into memory. We need to be sure the data
472 * is in memory before indicating that the instruction is
476 /* OTX_EP_IQ_SEND_SUCCESS */
482 otx_ep_send_data(struct otx_ep_device *otx_ep, struct otx_ep_instr_queue *iq,
483 void *cmd, int dbell)
487 /* Submit IQ command */
488 ret = post_iqcmd(iq, cmd);
490 if (ret == OTX_EP_IQ_SEND_SUCCESS) {
492 otx_ep_ring_doorbell(otx_ep, iq);
493 iq->stats.instr_posted++;
496 iq->stats.instr_dropped++;
498 otx_ep_ring_doorbell(otx_ep, iq);
504 set_sg_size(struct otx_ep_sg_entry *sg_entry, uint16_t size, uint32_t pos)
506 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
507 sg_entry->u.size[pos] = size;
508 #elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
509 sg_entry->u.size[3 - pos] = size;
513 /* Enqueue requests/packets to OTX_EP IQ queue.
514 * returns number of requests enqueued successfully
517 otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
519 struct otx_ep_instr_64B iqcmd;
520 struct otx_ep_instr_queue *iq;
521 struct otx_ep_device *otx_ep;
524 uint32_t iqreq_type, sgbuf_sz;
525 int dbell, index, count = 0;
526 unsigned int pkt_len, i;
531 iq = (struct otx_ep_instr_queue *)tx_queue;
532 otx_ep = iq->otx_ep_dev;
535 iqcmd.pki_ih3.u64 = 0;
539 iqcmd.ih.s.fsz = OTX_EP_FSZ;
540 iqcmd.ih.s.pkind = otx_ep->pkind; /* The SDK decided PKIND value */
543 iqcmd.pki_ih3.s.w = 1;
544 iqcmd.pki_ih3.s.utt = 1;
545 iqcmd.pki_ih3.s.tagtype = ORDERED_TAG;
546 /* sl will be sizeof(pki_ih3) */
547 iqcmd.pki_ih3.s.sl = OTX_EP_FSZ + OTX_CUST_DATA_LEN;
550 iqcmd.irh.s.opcode = OTX_EP_NW_PKT_OP;
552 for (i = 0; i < nb_pkts; i++) {
554 if (m->nb_segs == 1) {
556 dptr = rte_mbuf_data_iova(m);
557 pkt_len = rte_pktmbuf_data_len(m);
559 iqreq_type = OTX_EP_REQTYPE_NORESP_NET;
563 struct otx_ep_buf_free_info *finfo;
564 int j, frags, num_sg;
566 if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
569 finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
572 otx_ep_err("free buffer alloc failed\n");
575 num_sg = (m->nb_segs + 3) / 4;
576 sgbuf_sz = sizeof(struct otx_ep_sg_entry) * num_sg;
578 rte_zmalloc(NULL, sgbuf_sz, OTX_EP_SG_ALIGN);
579 if (finfo->g.sg == NULL) {
581 otx_ep_err("sg entry alloc failed\n");
586 finfo->g.num_sg = num_sg;
587 finfo->g.sg[0].ptr[0] = rte_mbuf_data_iova(m);
588 set_sg_size(&finfo->g.sg[0], m->data_len, 0);
589 pkt_len = m->data_len;
592 frags = m->nb_segs - 1;
596 finfo->g.sg[(j >> 2)].ptr[(j & 3)] =
597 rte_mbuf_data_iova(m);
598 set_sg_size(&finfo->g.sg[(j >> 2)],
599 m->data_len, (j & 3));
600 pkt_len += m->data_len;
604 dptr = rte_mem_virt2iova(finfo->g.sg);
606 iqreq_type = OTX_EP_REQTYPE_NORESP_GATHER;
607 if (pkt_len > OTX_EP_MAX_PKT_SZ) {
608 rte_free(finfo->g.sg);
610 otx_ep_err("failed\n");
615 iqcmd.ih.s.tlen = pkt_len + iqcmd.ih.s.fsz;
616 iqcmd.ih.s.gather = gather;
617 iqcmd.ih.s.gsz = gsz;
620 otx_ep_swap_8B_data(&iqcmd.irh.u64, 1);
622 #ifdef OTX_EP_IO_DEBUG
623 otx_ep_dbg("After swapping\n");
624 otx_ep_dbg("Word0 [dptr]: 0x%016lx\n",
625 (unsigned long)iqcmd.dptr);
626 otx_ep_dbg("Word1 [ihtx]: 0x%016lx\n", (unsigned long)iqcmd.ih);
627 otx_ep_dbg("Word2 [pki_ih3]: 0x%016lx\n",
628 (unsigned long)iqcmd.pki_ih3);
629 otx_ep_dbg("Word3 [rptr]: 0x%016lx\n",
630 (unsigned long)iqcmd.rptr);
631 otx_ep_dbg("Word4 [irh]: 0x%016lx\n", (unsigned long)iqcmd.irh);
632 otx_ep_dbg("Word5 [exhdr[0]]: 0x%016lx\n",
633 (unsigned long)iqcmd.exhdr[0]);
634 rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
636 dbell = (i == (unsigned int)(nb_pkts - 1)) ? 1 : 0;
637 index = iq->host_write_index;
638 if (otx_ep_send_data(otx_ep, iq, &iqcmd, dbell))
640 otx_ep_iqreq_add(iq, iqreq_buf, iqreq_type, index);
642 iq->stats.tx_bytes += pkt_len;
647 if (iq->instr_pending >= OTX_EP_MAX_INSTR)
650 /* Return no# of instructions posted successfully. */
654 /* Enqueue requests/packets to OTX_EP IQ queue.
655 * returns number of requests enqueued successfully
658 otx2_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
660 struct otx2_ep_instr_64B iqcmd2;
661 struct otx_ep_instr_queue *iq;
662 struct otx_ep_device *otx_ep;
667 unsigned int pkt_len;
669 uint32_t iqreq_type, sgbuf_sz;
674 iq = (struct otx_ep_instr_queue *)tx_queue;
675 otx_ep = iq->otx_ep_dev;
681 iqcmd2.ih.s.fsz = OTX2_EP_FSZ;
682 iqcmd2.ih.s.pkind = otx_ep->pkind; /* The SDK decided PKIND value */
684 iqcmd2.irh.s.opcode = OTX_EP_NW_PKT_OP;
686 for (i = 0; i < nb_pkts; i++) {
688 if (m->nb_segs == 1) {
690 dptr = rte_mbuf_data_iova(m);
691 pkt_len = rte_pktmbuf_data_len(m);
693 iqreq_type = OTX_EP_REQTYPE_NORESP_NET;
697 struct otx_ep_buf_free_info *finfo;
698 int j, frags, num_sg;
700 if (!(otx_ep->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS))
703 finfo = (struct otx_ep_buf_free_info *)
704 rte_malloc(NULL, sizeof(*finfo), 0);
706 otx_ep_err("free buffer alloc failed\n");
709 num_sg = (m->nb_segs + 3) / 4;
710 sgbuf_sz = sizeof(struct otx_ep_sg_entry) * num_sg;
712 rte_zmalloc(NULL, sgbuf_sz, OTX_EP_SG_ALIGN);
713 if (finfo->g.sg == NULL) {
715 otx_ep_err("sg entry alloc failed\n");
720 finfo->g.num_sg = num_sg;
721 finfo->g.sg[0].ptr[0] = rte_mbuf_data_iova(m);
722 set_sg_size(&finfo->g.sg[0], m->data_len, 0);
723 pkt_len = m->data_len;
726 frags = m->nb_segs - 1;
730 finfo->g.sg[(j >> 2)].ptr[(j & 3)] =
731 rte_mbuf_data_iova(m);
732 set_sg_size(&finfo->g.sg[(j >> 2)],
733 m->data_len, (j & 3));
734 pkt_len += m->data_len;
738 dptr = rte_mem_virt2iova(finfo->g.sg);
740 iqreq_type = OTX_EP_REQTYPE_NORESP_GATHER;
741 if (pkt_len > OTX_EP_MAX_PKT_SZ) {
742 rte_free(finfo->g.sg);
744 otx_ep_err("failed\n");
749 iqcmd2.ih.s.tlen = pkt_len + iqcmd2.ih.s.fsz;
750 iqcmd2.ih.s.gather = gather;
751 iqcmd2.ih.s.gsz = gsz;
753 otx_ep_swap_8B_data(&iqcmd2.irh.u64, 1);
755 #ifdef OTX_EP_IO_DEBUG
756 otx_ep_dbg("After swapping\n");
757 otx_ep_dbg("Word0 [dptr]: 0x%016lx\n",
758 (unsigned long)iqcmd.dptr);
759 otx_ep_dbg("Word1 [ihtx]: 0x%016lx\n", (unsigned long)iqcmd.ih);
760 otx_ep_dbg("Word2 [pki_ih3]: 0x%016lx\n",
761 (unsigned long)iqcmd.pki_ih3);
762 otx_ep_dbg("Word3 [rptr]: 0x%016lx\n",
763 (unsigned long)iqcmd.rptr);
764 otx_ep_dbg("Word4 [irh]: 0x%016lx\n", (unsigned long)iqcmd.irh);
765 otx_ep_dbg("Word5 [exhdr[0]]: 0x%016lx\n",
766 (unsigned long)iqcmd.exhdr[0]);
768 index = iq->host_write_index;
769 dbell = (i == (unsigned int)(nb_pkts - 1)) ? 1 : 0;
770 if (otx_ep_send_data(otx_ep, iq, &iqcmd2, dbell))
772 otx_ep_iqreq_add(iq, iqreq_buf, iqreq_type, index);
774 iq->stats.tx_bytes += pkt_len;
779 if (iq->instr_pending >= OTX_EP_MAX_INSTR)
782 /* Return no# of instructions posted successfully. */
787 otx_ep_droq_refill(struct otx_ep_droq *droq)
789 struct otx_ep_droq_desc *desc_ring;
790 struct otx_ep_droq_info *info;
791 struct rte_mbuf *buf = NULL;
792 uint32_t desc_refilled = 0;
794 desc_ring = droq->desc_ring;
796 while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
797 /* If a valid buffer exists (happens if there is no dispatch),
798 * reuse the buffer, else allocate.
800 if (droq->recv_buf_list[droq->refill_idx] != NULL)
803 buf = rte_pktmbuf_alloc(droq->mpool);
804 /* If a buffer could not be allocated, no point in
808 droq->stats.rx_alloc_failure++;
811 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
812 memset(info, 0, sizeof(*info));
814 droq->recv_buf_list[droq->refill_idx] = buf;
815 desc_ring[droq->refill_idx].buffer_ptr =
816 rte_mbuf_data_iova_default(buf);
819 droq->refill_idx = otx_ep_incr_index(droq->refill_idx, 1,
823 droq->refill_count--;
826 return desc_refilled;
829 static struct rte_mbuf *
830 otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
831 struct otx_ep_droq *droq, int next_fetch)
833 volatile struct otx_ep_droq_info *info;
834 struct rte_mbuf *droq_pkt2 = NULL;
835 struct rte_mbuf *droq_pkt = NULL;
836 struct rte_net_hdr_lens hdr_lens;
837 struct otx_ep_droq_info *info2;
838 uint64_t total_pkt_len;
839 uint32_t pkt_len = 0;
842 droq_pkt = droq->recv_buf_list[droq->read_idx];
843 droq_pkt2 = droq->recv_buf_list[droq->read_idx];
844 info = rte_pktmbuf_mtod(droq_pkt, struct otx_ep_droq_info *);
845 /* make sure info is available */
847 if (unlikely(!info->length)) {
848 int retry = OTX_EP_MAX_DELAYED_PKT_RETRIES;
849 /* otx_ep_dbg("OCTEON DROQ[%d]: read_idx: %d; Data not ready "
850 * "yet, Retry; pending=%lu\n", droq->q_no, droq->read_idx,
851 * droq->pkts_pending);
853 droq->stats.pkts_delayed_data++;
854 while (retry && !info->length)
856 if (!retry && !info->length) {
857 otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
858 droq->q_no, droq->read_idx);
859 /* May be zero length packet; drop it */
860 rte_pktmbuf_free(droq_pkt);
861 droq->recv_buf_list[droq->read_idx] = NULL;
862 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
864 droq->stats.dropped_zlp++;
865 droq->refill_count++;
870 next_idx = otx_ep_incr_index(droq->read_idx, 1, droq->nb_desc);
871 droq_pkt2 = droq->recv_buf_list[next_idx];
872 info2 = rte_pktmbuf_mtod(droq_pkt2, struct otx_ep_droq_info *);
873 rte_prefetch_non_temporal((const void *)info2);
876 info->length = rte_bswap64(info->length);
877 /* Deduce the actual data size */
878 total_pkt_len = info->length + INFO_SIZE;
879 if (total_pkt_len <= droq->buffer_size) {
880 info->length -= OTX_EP_RH_SIZE;
881 droq_pkt = droq->recv_buf_list[droq->read_idx];
882 if (likely(droq_pkt != NULL)) {
883 droq_pkt->data_off += OTX_EP_DROQ_INFO_SIZE;
884 /* otx_ep_dbg("OQ: pkt_len[%ld], buffer_size %d\n",
885 * (long)info->length, droq->buffer_size);
887 pkt_len = (uint32_t)info->length;
888 droq_pkt->pkt_len = pkt_len;
889 droq_pkt->data_len = pkt_len;
890 droq_pkt->port = otx_ep->port_id;
891 droq->recv_buf_list[droq->read_idx] = NULL;
892 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
894 droq->refill_count++;
897 struct rte_mbuf *first_buf = NULL;
898 struct rte_mbuf *last_buf = NULL;
900 while (pkt_len < total_pkt_len) {
903 cpy_len = ((pkt_len + droq->buffer_size) >
905 ? ((uint32_t)total_pkt_len -
909 droq_pkt = droq->recv_buf_list[droq->read_idx];
910 droq->recv_buf_list[droq->read_idx] = NULL;
912 if (likely(droq_pkt != NULL)) {
913 /* Note the first seg */
915 first_buf = droq_pkt;
917 droq_pkt->port = otx_ep->port_id;
919 droq_pkt->data_off +=
920 OTX_EP_DROQ_INFO_SIZE;
922 cpy_len - OTX_EP_DROQ_INFO_SIZE;
924 cpy_len - OTX_EP_DROQ_INFO_SIZE;
926 droq_pkt->pkt_len = cpy_len;
927 droq_pkt->data_len = cpy_len;
931 first_buf->nb_segs++;
932 first_buf->pkt_len += droq_pkt->pkt_len;
936 last_buf->next = droq_pkt;
940 otx_ep_err("no buf\n");
944 droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
946 droq->refill_count++;
948 droq_pkt = first_buf;
950 droq_pkt->packet_type = rte_net_get_ptype(droq_pkt, &hdr_lens,
952 droq_pkt->l2_len = hdr_lens.l2_len;
953 droq_pkt->l3_len = hdr_lens.l3_len;
954 droq_pkt->l4_len = hdr_lens.l4_len;
956 if (droq_pkt->nb_segs > 1 &&
957 !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
958 rte_pktmbuf_free(droq_pkt);
968 static inline uint32_t
969 otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
971 volatile uint64_t pkt_count;
974 /* Latest available OQ packets */
975 pkt_count = rte_read32(droq->pkts_sent_reg);
976 rte_write32(pkt_count, droq->pkts_sent_reg);
977 new_pkts = pkt_count;
978 droq->pkts_pending += new_pkts;
982 /* Check for response arrival from OCTEON 9
983 * returns number of requests completed
986 otx_ep_recv_pkts(void *rx_queue,
987 struct rte_mbuf **rx_pkts,
990 struct otx_ep_droq *droq = rx_queue;
991 struct otx_ep_device *otx_ep;
992 struct rte_mbuf *oq_pkt;
995 uint32_t new_pkts = 0;
998 otx_ep = droq->otx_ep_dev;
1000 if (droq->pkts_pending > budget) {
1003 new_pkts = droq->pkts_pending;
1004 new_pkts += otx_ep_check_droq_pkts(droq);
1005 if (new_pkts > budget)
1010 goto update_credit; /* No pkts at this moment */
1012 for (pkts = 0; pkts < new_pkts; pkts++) {
1013 /* Push the received pkt to application */
1014 next_fetch = (pkts == new_pkts - 1) ? 0 : 1;
1015 oq_pkt = otx_ep_droq_read_packet(otx_ep, droq, next_fetch);
1017 RTE_LOG_DP(ERR, PMD,
1018 "DROQ read pkt failed pending %" PRIu64
1019 "last_pkt_count %" PRIu64 "new_pkts %d.\n",
1020 droq->pkts_pending, droq->last_pkt_count,
1022 droq->pkts_pending -= pkts;
1023 droq->stats.rx_err++;
1026 rx_pkts[pkts] = oq_pkt;
1028 droq->stats.pkts_received++;
1029 droq->stats.bytes_received += oq_pkt->pkt_len;
1031 droq->pkts_pending -= pkts;
1033 /* Refill DROQ buffers */
1035 if (droq->refill_count >= DROQ_REFILL_THRESHOLD) {
1036 int desc_refilled = otx_ep_droq_refill(droq);
1038 /* Flush the droq descriptor data to memory to be sure
1039 * that when we update the credits the data in memory is
1043 rte_write32(desc_refilled, droq->pkts_credit_reg);
1046 * SDP output goes into DROP state when output doorbell count
1047 * goes below drop count. When door bell count is written with
1048 * a value greater than drop count SDP output should come out
1049 * of DROP state. Due to a race condition this is not happening.
1050 * Writing doorbell register with 0 again may make SDP output
1051 * come out of this state.
1054 rte_write32(0, droq->pkts_credit_reg);