1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
50 #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1)
52 /*********************************************************************
56 **********************************************************************/
59 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
60 struct rte_eth_txq_info *qinfo)
62 struct ionic_qcq *txq = dev->data->tx_queues[queue_id];
63 struct ionic_queue *q = &txq->q;
65 qinfo->nb_desc = q->num_descs;
66 qinfo->conf.offloads = txq->offloads;
67 qinfo->conf.tx_deferred_start = txq->deferred_start;
70 static inline void __attribute__((cold))
71 ionic_tx_flush(struct ionic_cq *cq)
73 struct ionic_queue *q = cq->bound_q;
74 struct ionic_desc_info *q_desc_info;
75 struct rte_mbuf *txm, *next;
76 struct ionic_txq_comp *cq_desc_base = cq->base;
77 struct ionic_txq_comp *cq_desc;
78 u_int32_t comp_index = (u_int32_t)-1;
80 cq_desc = &cq_desc_base[cq->tail_idx];
81 while (color_match(cq_desc->color, cq->done_color)) {
82 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
84 /* Prefetch the next 4 descriptors (not really useful here) */
85 if ((cq->tail_idx & 0x3) == 0)
86 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
88 if (cq->tail_idx == 0)
89 cq->done_color = !cq->done_color;
91 comp_index = cq_desc->comp_index;
93 cq_desc = &cq_desc_base[cq->tail_idx];
96 if (comp_index != (u_int32_t)-1) {
97 while (q->tail_idx != comp_index) {
98 q_desc_info = &q->info[q->tail_idx];
100 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
102 /* Prefetch the next 4 descriptors */
103 if ((q->tail_idx & 0x3) == 0)
105 rte_prefetch0(&q->info[q->tail_idx]);
108 * Note: you can just use rte_pktmbuf_free,
109 * but this loop is faster
111 txm = q_desc_info->cb_arg;
112 while (txm != NULL) {
114 rte_pktmbuf_free_seg(txm);
121 void __attribute__((cold))
122 ionic_dev_tx_queue_release(void *tx_queue)
124 struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
131 int __attribute__((cold))
132 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
134 struct ionic_qcq *txq;
138 txq = eth_dev->data->tx_queues[tx_queue_id];
141 * Note: we should better post NOP Tx desc and wait for its completion
142 * before disabling Tx queue
145 ionic_qcq_disable(txq);
147 ionic_tx_flush(&txq->cq);
149 ionic_lif_txq_deinit(txq);
151 eth_dev->data->tx_queue_state[tx_queue_id] =
152 RTE_ETH_QUEUE_STATE_STOPPED;
157 int __attribute__((cold))
158 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
159 uint16_t nb_desc, uint32_t socket_id __rte_unused,
160 const struct rte_eth_txconf *tx_conf)
162 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
163 struct ionic_qcq *txq;
169 IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers",
170 tx_queue_id, nb_desc);
172 if (tx_queue_id >= lif->ntxqcqs) {
173 IONIC_PRINT(DEBUG, "Queue index %u not available "
175 tx_queue_id, lif->ntxqcqs);
179 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
181 /* Validate number of receive descriptors */
182 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
183 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
185 /* Free memory prior to re-allocation if needed... */
186 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
187 void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
188 ionic_dev_tx_queue_release(tx_queue);
189 eth_dev->data->tx_queues[tx_queue_id] = NULL;
192 err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
194 IONIC_PRINT(DEBUG, "Queue allocation failure");
198 /* Do not start queue with rte_eth_dev_start() */
199 txq->deferred_start = tx_conf->tx_deferred_start;
201 txq->offloads = offloads;
203 eth_dev->data->tx_queues[tx_queue_id] = txq;
209 * Start Transmit Units for specified queue.
211 int __attribute__((cold))
212 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
214 struct ionic_qcq *txq;
219 txq = eth_dev->data->tx_queues[tx_queue_id];
221 err = ionic_lif_txq_init(txq);
225 ionic_qcq_enable(txq);
227 eth_dev->data->tx_queue_state[tx_queue_id] =
228 RTE_ETH_QUEUE_STATE_STARTED;
234 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
235 struct rte_mbuf *txm,
236 rte_iova_t addr, uint8_t nsge, uint16_t len,
237 uint32_t hdrlen, uint32_t mss,
238 uint16_t vlan_tci, bool has_vlan,
239 bool start, bool done)
242 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
243 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
244 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
246 desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
249 desc->vlan_tci = vlan_tci;
250 desc->hdr_len = hdrlen;
253 ionic_q_post(q, done, NULL, done ? txm : NULL);
256 static struct ionic_txq_desc *
257 ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
259 struct ionic_txq_desc *desc_base = q->base;
260 struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
261 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
262 struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
264 *elem = sg_desc->elems;
269 ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
270 uint64_t offloads __rte_unused, bool not_xmit_more)
272 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
273 struct ionic_txq_desc *desc;
274 struct ionic_txq_sg_elem *elem;
275 struct rte_mbuf *txm_seg;
276 uint64_t desc_addr = 0;
277 uint16_t desc_len = 0;
280 uint32_t mss = txm->tso_segsz;
281 uint32_t frag_left = 0;
287 bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
288 uint16_t vlan_tci = txm->vlan_tci;
290 hdrlen = txm->l2_len + txm->l3_len;
292 seglen = hdrlen + mss;
293 left = txm->data_len;
295 desc = ionic_tx_tso_next(q, &elem);
298 /* Chop data up into desc segments */
301 len = RTE_MIN(seglen, left);
302 frag_left = seglen - len;
303 desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
308 if (txm->nb_segs > 1 && frag_left > 0)
310 done = (txm->nb_segs == 1 && left == 0);
311 ionic_tx_tso_post(q, desc, txm,
312 desc_addr, desc_nsge, desc_len,
315 start, done && not_xmit_more);
316 desc = ionic_tx_tso_next(q, &elem);
321 /* Chop frags into desc segments */
324 while (txm_seg != NULL) {
326 left = txm_seg->data_len;
330 rte_iova_t data_iova;
331 data_iova = rte_mbuf_data_iova(txm_seg);
332 elem->addr = rte_cpu_to_le_64(data_iova) + offset;
334 len = RTE_MIN(frag_left, left);
340 len = RTE_MIN(mss, left);
341 frag_left = mss - len;
342 data_iova = rte_mbuf_data_iova(txm_seg);
343 desc_addr = rte_cpu_to_le_64(data_iova);
349 if (txm_seg->next != NULL && frag_left > 0)
351 done = (txm_seg->next == NULL && left == 0);
352 ionic_tx_tso_post(q, desc, txm_seg,
353 desc_addr, desc_nsge, desc_len,
356 start, done && not_xmit_more);
357 desc = ionic_tx_tso_next(q, &elem);
361 txm_seg = txm_seg->next;
370 ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
371 uint64_t offloads __rte_unused, bool not_xmit_more)
373 struct ionic_txq_desc *desc_base = q->base;
374 struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
375 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
376 struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
377 struct ionic_txq_sg_elem *elem = sg_desc->elems;
378 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
379 struct rte_mbuf *txm_seg;
381 uint64_t ol_flags = txm->ol_flags;
382 uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
383 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
386 has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
388 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
390 desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
391 desc->len = txm->data_len;
392 desc->vlan_tci = txm->vlan_tci;
395 while (txm_seg != NULL) {
396 elem->len = txm_seg->data_len;
397 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
400 txm_seg = txm_seg->next;
403 ionic_q_post(q, not_xmit_more, NULL, txm);
409 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
412 struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
413 struct ionic_queue *q = &txq->q;
414 struct ionic_cq *cq = &txq->cq;
415 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
416 uint32_t next_q_head_idx;
417 uint32_t bytes_tx = 0;
422 /* Cleaning old buffers */
425 if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
426 stats->stop += nb_pkts;
430 while (nb_tx < nb_pkts) {
431 last = (nb_tx == (nb_pkts - 1));
433 next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);
434 if ((next_q_head_idx & 0x3) == 0) {
435 struct ionic_txq_desc *desc_base = q->base;
436 rte_prefetch0(&desc_base[next_q_head_idx]);
437 rte_prefetch0(&q->info[next_q_head_idx]);
440 if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
441 err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
444 err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
446 stats->drop += nb_pkts - nb_tx;
452 bytes_tx += tx_pkts[nb_tx]->pkt_len;
456 stats->packets += nb_tx;
457 stats->bytes += bytes_tx;
462 /*********************************************************************
466 **********************************************************************/
468 #define IONIC_TX_OFFLOAD_MASK ( \
475 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
476 (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
479 ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
482 struct rte_mbuf *txm;
486 for (i = 0; i < nb_pkts; i++) {
489 if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {
494 offloads = txm->ol_flags;
496 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
497 rte_errno = -ENOTSUP;
505 /*********************************************************************
509 **********************************************************************/
511 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
512 struct rte_mbuf *mbuf);
515 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
516 struct rte_eth_rxq_info *qinfo)
518 struct ionic_qcq *rxq = dev->data->rx_queues[queue_id];
519 struct ionic_queue *q = &rxq->q;
521 qinfo->mp = rxq->mb_pool;
522 qinfo->scattered_rx = dev->data->scattered_rx;
523 qinfo->nb_desc = q->num_descs;
524 qinfo->conf.rx_deferred_start = rxq->deferred_start;
525 qinfo->conf.offloads = rxq->offloads;
528 static void __attribute__((cold))
529 ionic_rx_empty(struct ionic_queue *q)
531 struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
532 struct ionic_desc_info *cur;
533 struct rte_mbuf *mbuf;
535 while (q->tail_idx != q->head_idx) {
536 cur = &q->info[q->tail_idx];
538 rte_mempool_put(rxq->mb_pool, mbuf);
540 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
544 void __attribute__((cold))
545 ionic_dev_rx_queue_release(void *rx_queue)
547 struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
551 ionic_rx_empty(&rxq->q);
556 int __attribute__((cold))
557 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
558 uint16_t rx_queue_id,
560 uint32_t socket_id __rte_unused,
561 const struct rte_eth_rxconf *rx_conf,
562 struct rte_mempool *mp)
564 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
565 struct ionic_qcq *rxq;
571 IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers",
572 rx_queue_id, nb_desc);
574 if (rx_queue_id >= lif->nrxqcqs) {
576 "Queue index %u not available (max %u queues)",
577 rx_queue_id, lif->nrxqcqs);
581 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
583 /* Validate number of receive descriptors */
584 if (!rte_is_power_of_2(nb_desc) ||
585 nb_desc < IONIC_MIN_RING_DESC ||
586 nb_desc > IONIC_MAX_RING_DESC) {
588 "Bad number of descriptors (%u) for queue %u (min: %u)",
589 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
590 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
593 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
594 eth_dev->data->scattered_rx = 1;
596 /* Free memory prior to re-allocation if needed... */
597 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
598 void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
599 ionic_dev_rx_queue_release(rx_queue);
600 eth_dev->data->rx_queues[rx_queue_id] = NULL;
603 err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
605 IONIC_PRINT(ERR, "Queue allocation failure");
612 * Note: the interface does not currently support
613 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
614 * when the adapter will be able to keep the CRC and subtract
615 * it to the length for all received packets:
616 * if (eth_dev->data->dev_conf.rxmode.offloads &
617 * DEV_RX_OFFLOAD_KEEP_CRC)
618 * rxq->crc_len = ETHER_CRC_LEN;
621 /* Do not start queue with rte_eth_dev_start() */
622 rxq->deferred_start = rx_conf->rx_deferred_start;
624 rxq->offloads = offloads;
626 eth_dev->data->rx_queues[rx_queue_id] = rxq;
632 ionic_rx_clean(struct ionic_queue *q,
633 uint32_t q_desc_index, uint32_t cq_desc_index,
634 void *cb_arg, void *service_cb_arg)
636 struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;
637 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
638 struct rte_mbuf *rxm = cb_arg;
639 struct rte_mbuf *rxm_seg;
640 struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
641 uint32_t max_frame_size =
642 rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
643 uint64_t pkt_flags = 0;
645 struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);
646 struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
648 uint32_t buf_size = (uint16_t)
649 (rte_pktmbuf_data_room_size(rxq->mb_pool) -
650 RTE_PKTMBUF_HEADROOM);
656 rte_pktmbuf_free(rxm);
658 * Note: rte_mempool_put is faster with no segs
659 * rte_mempool_put(rxq->mb_pool, rxm);
664 if (cq_desc->status) {
665 stats->bad_cq_status++;
666 ionic_rx_recycle(q, q_desc_index, rxm);
670 if (recv_args->nb_rx >= recv_args->nb_pkts) {
672 ionic_rx_recycle(q, q_desc_index, rxm);
676 if (cq_desc->len > max_frame_size ||
679 ionic_rx_recycle(q, q_desc_index, rxm);
683 rxm->data_off = RTE_PKTMBUF_HEADROOM;
684 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
685 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
686 rxm->pkt_len = cq_desc->len;
687 rxm->port = rxq->lif->port_id;
691 rxm->data_len = RTE_MIN(buf_size, left);
692 left -= rxm->data_len;
695 while (rxm_seg && left) {
696 rxm_seg->data_len = RTE_MIN(buf_size, left);
697 left -= rxm_seg->data_len;
699 rxm_seg = rxm_seg->next;
704 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
705 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
706 rxm->vlan_tci = cq_desc->vlan_tci;
710 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
711 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
712 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
713 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
714 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
716 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
717 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
718 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
719 else if ((cq_desc->csum_flags &
720 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
721 (cq_desc->csum_flags &
722 IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
723 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
726 rxm->ol_flags = pkt_flags;
729 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
730 case IONIC_PKT_TYPE_IPV4:
731 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
733 case IONIC_PKT_TYPE_IPV6:
734 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
736 case IONIC_PKT_TYPE_IPV4_TCP:
737 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
740 case IONIC_PKT_TYPE_IPV6_TCP:
741 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
744 case IONIC_PKT_TYPE_IPV4_UDP:
745 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
748 case IONIC_PKT_TYPE_IPV6_UDP:
749 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
754 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
755 struct rte_ether_hdr *);
756 uint16_t ether_type = eth_h->ether_type;
757 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
758 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
760 pkt_type = RTE_PTYPE_UNKNOWN;
765 rxm->packet_type = pkt_type;
767 recv_args->rx_pkts[recv_args->nb_rx] = rxm;
771 stats->bytes += rxm->pkt_len;
775 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
776 struct rte_mbuf *mbuf)
778 struct ionic_rxq_desc *desc_base = q->base;
779 struct ionic_rxq_desc *old = &desc_base[q_desc_index];
780 struct ionic_rxq_desc *new = &desc_base[q->head_idx];
782 new->addr = old->addr;
785 ionic_q_post(q, true, ionic_rx_clean, mbuf);
788 static int __attribute__((cold))
789 ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
791 struct ionic_queue *q = &rxq->q;
792 struct ionic_rxq_desc *desc_base = q->base;
793 struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;
794 struct ionic_rxq_desc *desc;
795 struct ionic_rxq_sg_desc *sg_desc;
796 struct ionic_rxq_sg_elem *elem;
798 uint32_t i, j, nsegs, buf_size, size;
801 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
802 RTE_PKTMBUF_HEADROOM);
804 /* Initialize software ring entries */
805 for (i = ionic_q_space_avail(q); i; i--) {
806 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
807 struct rte_mbuf *prev_rxm_seg;
810 IONIC_PRINT(ERR, "RX mbuf alloc failed");
814 nsegs = (len + buf_size - 1) / buf_size;
816 desc = &desc_base[q->head_idx];
817 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
818 desc->addr = dma_addr;
819 desc->len = buf_size;
821 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
822 IONIC_RXQ_DESC_OPCODE_SIMPLE;
826 sg_desc = &sg_desc_base[q->head_idx];
827 elem = sg_desc->elems;
828 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
829 struct rte_mbuf *rxm_seg;
830 rte_iova_t data_iova;
832 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
833 if (rxm_seg == NULL) {
834 IONIC_PRINT(ERR, "RX mbuf alloc failed");
838 data_iova = rte_mbuf_data_iova(rxm_seg);
839 dma_addr = rte_cpu_to_le_64(data_iova);
840 elem->addr = dma_addr;
841 elem->len = buf_size;
844 rxm_seg->next = NULL;
845 prev_rxm_seg->next = rxm_seg;
846 prev_rxm_seg = rxm_seg;
850 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
853 ring_doorbell = ((q->head_idx + 1) &
854 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
856 ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);
863 * Start Receive Units for specified queue.
865 int __attribute__((cold))
866 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
868 uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
869 struct ionic_qcq *rxq;
874 IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)",
877 rxq = eth_dev->data->rx_queues[rx_queue_id];
879 err = ionic_lif_rxq_init(rxq);
883 /* Allocate buffers for descriptor rings */
884 if (ionic_rx_fill(rxq, frame_size) != 0) {
885 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
890 ionic_qcq_enable(rxq);
892 eth_dev->data->rx_queue_state[rx_queue_id] =
893 RTE_ETH_QUEUE_STATE_STARTED;
898 static inline void __attribute__((cold))
899 ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
900 void *service_cb_arg)
902 struct ionic_queue *q = cq->bound_q;
903 struct ionic_desc_info *q_desc_info;
904 struct ionic_rxq_comp *cq_desc_base = cq->base;
905 struct ionic_rxq_comp *cq_desc;
907 uint32_t curr_q_tail_idx, curr_cq_tail_idx;
908 uint32_t work_done = 0;
913 cq_desc = &cq_desc_base[cq->tail_idx];
914 while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
915 curr_cq_tail_idx = cq->tail_idx;
916 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
918 if (cq->tail_idx == 0)
919 cq->done_color = !cq->done_color;
921 /* Prefetch the next 4 descriptors */
922 if ((cq->tail_idx & 0x3) == 0)
923 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
926 more = (q->tail_idx != cq_desc->comp_index);
928 q_desc_info = &q->info[q->tail_idx];
930 curr_q_tail_idx = q->tail_idx;
931 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
933 /* Prefetch the next 4 descriptors */
934 if ((q->tail_idx & 0x3) == 0)
936 rte_prefetch0(&q->info[q->tail_idx]);
938 ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,
939 q_desc_info->cb_arg, service_cb_arg);
943 if (++work_done == work_to_do)
946 cq_desc = &cq_desc_base[cq->tail_idx];
951 * Stop Receive Units for specified queue.
953 int __attribute__((cold))
954 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
956 struct ionic_qcq *rxq;
960 rxq = eth_dev->data->rx_queues[rx_queue_id];
962 ionic_qcq_disable(rxq);
965 ionic_rxq_service(&rxq->cq, -1, NULL);
967 ionic_lif_rxq_deinit(rxq);
969 eth_dev->data->rx_queue_state[rx_queue_id] =
970 RTE_ETH_QUEUE_STATE_STOPPED;
976 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
979 struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
980 uint32_t frame_size =
981 rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
982 struct ionic_cq *cq = &rxq->cq;
983 struct ionic_rx_service service_cb_arg;
985 service_cb_arg.rx_pkts = rx_pkts;
986 service_cb_arg.nb_pkts = nb_pkts;
987 service_cb_arg.nb_rx = 0;
989 ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
991 ionic_rx_fill(rxq, frame_size);
993 return service_cb_arg.nb_rx;