1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
50 #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1)
52 /*********************************************************************
56 **********************************************************************/
59 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
60 struct rte_eth_txq_info *qinfo)
62 struct ionic_qcq *txq = dev->data->tx_queues[queue_id];
63 struct ionic_queue *q = &txq->q;
65 qinfo->nb_desc = q->num_descs;
66 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
67 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
70 static __rte_always_inline void
71 ionic_tx_flush(struct ionic_cq *cq)
73 struct ionic_queue *q = cq->bound_q;
74 struct ionic_desc_info *q_desc_info;
75 struct rte_mbuf *txm, *next;
76 struct ionic_txq_comp *cq_desc_base = cq->base;
77 struct ionic_txq_comp *cq_desc;
78 u_int32_t comp_index = (u_int32_t)-1;
80 cq_desc = &cq_desc_base[cq->tail_idx];
81 while (color_match(cq_desc->color, cq->done_color)) {
82 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
84 /* Prefetch the next 4 descriptors (not really useful here) */
85 if ((cq->tail_idx & 0x3) == 0)
86 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
88 if (cq->tail_idx == 0)
89 cq->done_color = !cq->done_color;
91 comp_index = cq_desc->comp_index;
93 cq_desc = &cq_desc_base[cq->tail_idx];
96 if (comp_index != (u_int32_t)-1) {
97 while (q->tail_idx != comp_index) {
98 q_desc_info = &q->info[q->tail_idx];
100 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
102 /* Prefetch the next 4 descriptors */
103 if ((q->tail_idx & 0x3) == 0)
105 rte_prefetch0(&q->info[q->tail_idx]);
108 * Note: you can just use rte_pktmbuf_free,
109 * but this loop is faster
111 txm = q_desc_info->cb_arg;
112 while (txm != NULL) {
114 rte_pktmbuf_free_seg(txm);
122 ionic_dev_tx_queue_release(void *tx_queue)
124 struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
128 ionic_lif_txq_deinit(txq);
134 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
136 struct ionic_qcq *txq;
138 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
140 txq = eth_dev->data->tx_queues[tx_queue_id];
142 eth_dev->data->tx_queue_state[tx_queue_id] =
143 RTE_ETH_QUEUE_STATE_STOPPED;
146 * Note: we should better post NOP Tx desc and wait for its completion
147 * before disabling Tx queue
150 ionic_qcq_disable(txq);
152 ionic_tx_flush(&txq->cq);
158 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
159 uint16_t nb_desc, uint32_t socket_id,
160 const struct rte_eth_txconf *tx_conf)
162 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
163 struct ionic_qcq *txq;
167 if (tx_queue_id >= lif->ntxqcqs) {
168 IONIC_PRINT(DEBUG, "Queue index %u not available "
170 tx_queue_id, lif->ntxqcqs);
174 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
176 "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
177 socket_id, tx_queue_id, nb_desc, offloads);
179 /* Validate number of receive descriptors */
180 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
181 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
183 /* Free memory prior to re-allocation if needed... */
184 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
185 void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
186 ionic_dev_tx_queue_release(tx_queue);
187 eth_dev->data->tx_queues[tx_queue_id] = NULL;
190 eth_dev->data->tx_queue_state[tx_queue_id] =
191 RTE_ETH_QUEUE_STATE_STOPPED;
193 err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
195 IONIC_PRINT(DEBUG, "Queue allocation failure");
199 /* Do not start queue with rte_eth_dev_start() */
200 if (tx_conf->tx_deferred_start)
201 txq->flags |= IONIC_QCQ_F_DEFERRED;
203 /* Convert the offload flags into queue flags */
204 if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
205 txq->flags |= IONIC_QCQ_F_CSUM_L3;
206 if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
207 txq->flags |= IONIC_QCQ_F_CSUM_TCP;
208 if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
209 txq->flags |= IONIC_QCQ_F_CSUM_UDP;
211 eth_dev->data->tx_queues[tx_queue_id] = txq;
217 * Start Transmit Units for specified queue.
220 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
222 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
223 struct ionic_qcq *txq;
226 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
227 IONIC_PRINT(DEBUG, "TX queue %u already started",
232 txq = eth_dev->data->tx_queues[tx_queue_id];
234 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
235 tx_queue_id, txq->q.num_descs);
237 if (!(txq->flags & IONIC_QCQ_F_INITED)) {
238 err = ionic_lif_txq_init(txq);
242 ionic_qcq_enable(txq);
245 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
251 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
253 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
254 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
255 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
256 (l3_hdr + txm->l3_len);
258 if (txm->ol_flags & PKT_TX_IP_CKSUM) {
259 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
260 ipv4_hdr->hdr_checksum = 0;
262 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
264 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
266 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
271 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
273 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
274 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
275 txm->outer_l3_len + txm->l2_len;
276 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
277 (l3_hdr + txm->l3_len);
279 if (txm->ol_flags & PKT_TX_IPV4) {
280 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
281 ipv4_hdr->hdr_checksum = 0;
283 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
285 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
287 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
292 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
293 struct rte_mbuf *txm,
294 rte_iova_t addr, uint8_t nsge, uint16_t len,
295 uint32_t hdrlen, uint32_t mss,
297 uint16_t vlan_tci, bool has_vlan,
298 bool start, bool done)
301 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
302 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
303 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
304 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
306 desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
309 desc->vlan_tci = vlan_tci;
310 desc->hdr_len = hdrlen;
313 ionic_q_post(q, done, NULL, done ? txm : NULL);
316 static struct ionic_txq_desc *
317 ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
319 struct ionic_txq_desc *desc_base = q->base;
320 struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
321 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
322 struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
324 *elem = sg_desc->elems;
329 ionic_tx_tso(struct ionic_qcq *txq, struct rte_mbuf *txm,
332 struct ionic_queue *q = &txq->q;
333 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
334 struct ionic_txq_desc *desc;
335 struct ionic_txq_sg_elem *elem;
336 struct rte_mbuf *txm_seg;
337 uint64_t desc_addr = 0;
338 uint16_t desc_len = 0;
341 uint32_t mss = txm->tso_segsz;
342 uint32_t frag_left = 0;
349 bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
350 uint16_t vlan_tci = txm->vlan_tci;
351 uint64_t ol_flags = txm->ol_flags;
353 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
354 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
355 ((ol_flags & PKT_TX_OUTER_IPV4) ||
356 (ol_flags & PKT_TX_OUTER_IPV6));
358 /* Preload inner-most TCP csum field with IP pseudo hdr
359 * calculated with IP length set to zero. HW will later
360 * add in length to each TCP segment resulting from the TSO.
364 ionic_tx_tcp_inner_pseudo_csum(txm);
365 hdrlen = txm->outer_l2_len + txm->outer_l3_len +
366 txm->l2_len + txm->l3_len + txm->l4_len;
368 ionic_tx_tcp_pseudo_csum(txm);
369 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
372 seglen = hdrlen + mss;
373 left = txm->data_len;
375 desc = ionic_tx_tso_next(q, &elem);
378 /* Chop data up into desc segments */
381 len = RTE_MIN(seglen, left);
382 frag_left = seglen - len;
383 desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
388 if (txm->nb_segs > 1 && frag_left > 0)
390 done = (txm->nb_segs == 1 && left == 0);
391 ionic_tx_tso_post(q, desc, txm,
392 desc_addr, desc_nsge, desc_len,
396 start, done && not_xmit_more);
397 desc = ionic_tx_tso_next(q, &elem);
402 /* Chop frags into desc segments */
405 while (txm_seg != NULL) {
407 left = txm_seg->data_len;
411 rte_iova_t data_iova;
412 data_iova = rte_mbuf_data_iova(txm_seg);
413 elem->addr = rte_cpu_to_le_64(data_iova) + offset;
415 len = RTE_MIN(frag_left, left);
421 len = RTE_MIN(mss, left);
422 frag_left = mss - len;
423 data_iova = rte_mbuf_data_iova(txm_seg);
424 desc_addr = rte_cpu_to_le_64(data_iova);
430 if (txm_seg->next != NULL && frag_left > 0)
432 done = (txm_seg->next == NULL && left == 0);
433 ionic_tx_tso_post(q, desc, txm_seg,
434 desc_addr, desc_nsge, desc_len,
438 start, done && not_xmit_more);
439 desc = ionic_tx_tso_next(q, &elem);
443 txm_seg = txm_seg->next;
451 static __rte_always_inline int
452 ionic_tx(struct ionic_qcq *txq, struct rte_mbuf *txm,
455 struct ionic_queue *q = &txq->q;
456 struct ionic_txq_desc *desc_base = q->base;
457 struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
458 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
459 struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
460 struct ionic_txq_sg_elem *elem = sg_desc->elems;
461 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
462 struct rte_mbuf *txm_seg;
465 uint64_t ol_flags = txm->ol_flags;
466 uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
467 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
470 if ((ol_flags & PKT_TX_IP_CKSUM) &&
471 (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
472 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
473 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
476 if (((ol_flags & PKT_TX_TCP_CKSUM) &&
477 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
478 ((ol_flags & PKT_TX_UDP_CKSUM) &&
479 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
480 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
481 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
484 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
487 has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
488 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
489 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
490 ((ol_flags & PKT_TX_OUTER_IPV4) ||
491 (ol_flags & PKT_TX_OUTER_IPV6));
493 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
494 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
496 desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
497 desc->len = txm->data_len;
498 desc->vlan_tci = txm->vlan_tci;
501 while (txm_seg != NULL) {
502 elem->len = txm_seg->data_len;
503 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
506 txm_seg = txm_seg->next;
509 ionic_q_post(q, not_xmit_more, NULL, txm);
515 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
518 struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
519 struct ionic_queue *q = &txq->q;
520 struct ionic_cq *cq = &txq->cq;
521 struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
522 uint32_t next_q_head_idx;
523 uint32_t bytes_tx = 0;
528 /* Cleaning old buffers */
531 if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
532 stats->stop += nb_pkts;
536 while (nb_tx < nb_pkts) {
537 last = (nb_tx == (nb_pkts - 1));
539 next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);
540 if ((next_q_head_idx & 0x3) == 0) {
541 struct ionic_txq_desc *desc_base = q->base;
542 rte_prefetch0(&desc_base[next_q_head_idx]);
543 rte_prefetch0(&q->info[next_q_head_idx]);
546 if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
547 err = ionic_tx_tso(txq, tx_pkts[nb_tx], last);
549 err = ionic_tx(txq, tx_pkts[nb_tx], last);
551 stats->drop += nb_pkts - nb_tx;
557 bytes_tx += tx_pkts[nb_tx]->pkt_len;
561 stats->packets += nb_tx;
562 stats->bytes += bytes_tx;
567 /*********************************************************************
571 **********************************************************************/
573 #define IONIC_TX_OFFLOAD_MASK ( \
581 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
582 (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
585 ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
588 struct rte_mbuf *txm;
592 for (i = 0; i < nb_pkts; i++) {
595 if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {
600 offloads = txm->ol_flags;
602 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
603 rte_errno = -ENOTSUP;
611 /*********************************************************************
615 **********************************************************************/
617 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
618 struct rte_mbuf *mbuf);
621 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
622 struct rte_eth_rxq_info *qinfo)
624 struct ionic_qcq *rxq = dev->data->rx_queues[queue_id];
625 struct ionic_queue *q = &rxq->q;
627 qinfo->mp = rxq->mb_pool;
628 qinfo->scattered_rx = dev->data->scattered_rx;
629 qinfo->nb_desc = q->num_descs;
630 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
631 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
634 static void __rte_cold
635 ionic_rx_empty(struct ionic_queue *q)
637 struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
638 struct ionic_desc_info *cur;
639 struct rte_mbuf *mbuf;
641 while (q->tail_idx != q->head_idx) {
642 cur = &q->info[q->tail_idx];
644 rte_mempool_put(rxq->mb_pool, mbuf);
646 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
651 ionic_dev_rx_queue_release(void *rx_queue)
653 struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
657 ionic_rx_empty(&rxq->q);
659 ionic_lif_rxq_deinit(rxq);
665 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
666 uint16_t rx_queue_id,
669 const struct rte_eth_rxconf *rx_conf,
670 struct rte_mempool *mp)
672 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
673 struct ionic_qcq *rxq;
677 if (rx_queue_id >= lif->nrxqcqs) {
679 "Queue index %u not available (max %u queues)",
680 rx_queue_id, lif->nrxqcqs);
684 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
686 "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
687 socket_id, rx_queue_id, nb_desc, offloads);
689 if (!rx_conf->rx_drop_en)
690 IONIC_PRINT(WARNING, "No-drop mode is not supported");
692 /* Validate number of receive descriptors */
693 if (!rte_is_power_of_2(nb_desc) ||
694 nb_desc < IONIC_MIN_RING_DESC ||
695 nb_desc > IONIC_MAX_RING_DESC) {
697 "Bad descriptor count (%u) for queue %u (min: %u)",
698 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
699 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
702 /* Free memory prior to re-allocation if needed... */
703 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
704 void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
705 ionic_dev_rx_queue_release(rx_queue);
706 eth_dev->data->rx_queues[rx_queue_id] = NULL;
709 eth_dev->data->rx_queue_state[rx_queue_id] =
710 RTE_ETH_QUEUE_STATE_STOPPED;
712 err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
714 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
721 * Note: the interface does not currently support
722 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
723 * when the adapter will be able to keep the CRC and subtract
724 * it to the length for all received packets:
725 * if (eth_dev->data->dev_conf.rxmode.offloads &
726 * DEV_RX_OFFLOAD_KEEP_CRC)
727 * rxq->crc_len = ETHER_CRC_LEN;
730 /* Do not start queue with rte_eth_dev_start() */
731 if (rx_conf->rx_deferred_start)
732 rxq->flags |= IONIC_QCQ_F_DEFERRED;
734 eth_dev->data->rx_queues[rx_queue_id] = rxq;
739 static __rte_always_inline void
740 ionic_rx_clean(struct ionic_queue *q,
741 uint32_t q_desc_index, uint32_t cq_desc_index,
742 void *cb_arg, void *service_cb_arg)
744 struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;
745 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
746 struct rte_mbuf *rxm = cb_arg;
747 struct rte_mbuf *rxm_seg;
748 struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
749 uint32_t max_frame_size =
750 rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
751 uint64_t pkt_flags = 0;
753 struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);
754 struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
756 uint32_t buf_size = (uint16_t)
757 (rte_pktmbuf_data_room_size(rxq->mb_pool) -
758 RTE_PKTMBUF_HEADROOM);
764 rte_pktmbuf_free(rxm);
766 * Note: rte_mempool_put is faster with no segs
767 * rte_mempool_put(rxq->mb_pool, rxm);
772 if (cq_desc->status) {
773 stats->bad_cq_status++;
774 ionic_rx_recycle(q, q_desc_index, rxm);
778 if (recv_args->nb_rx >= recv_args->nb_pkts) {
780 ionic_rx_recycle(q, q_desc_index, rxm);
784 if (cq_desc->len > max_frame_size ||
787 ionic_rx_recycle(q, q_desc_index, rxm);
791 rxm->data_off = RTE_PKTMBUF_HEADROOM;
792 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
793 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
794 rxm->pkt_len = cq_desc->len;
795 rxm->port = rxq->lif->port_id;
799 rxm->data_len = RTE_MIN(buf_size, left);
800 left -= rxm->data_len;
803 while (rxm_seg && left) {
804 rxm_seg->data_len = RTE_MIN(buf_size, left);
805 left -= rxm_seg->data_len;
807 rxm_seg = rxm_seg->next;
812 pkt_flags |= PKT_RX_RSS_HASH;
813 rxm->hash.rss = cq_desc->rss_hash;
816 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
817 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
818 rxm->vlan_tci = cq_desc->vlan_tci;
822 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
823 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
824 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
825 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
826 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
828 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
829 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
830 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
831 else if ((cq_desc->csum_flags &
832 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
833 (cq_desc->csum_flags &
834 IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
835 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
838 rxm->ol_flags = pkt_flags;
841 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
842 case IONIC_PKT_TYPE_IPV4:
843 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
845 case IONIC_PKT_TYPE_IPV6:
846 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
848 case IONIC_PKT_TYPE_IPV4_TCP:
849 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
852 case IONIC_PKT_TYPE_IPV6_TCP:
853 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
856 case IONIC_PKT_TYPE_IPV4_UDP:
857 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
860 case IONIC_PKT_TYPE_IPV6_UDP:
861 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
866 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
867 struct rte_ether_hdr *);
868 uint16_t ether_type = eth_h->ether_type;
869 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
870 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
872 pkt_type = RTE_PTYPE_UNKNOWN;
877 rxm->packet_type = pkt_type;
879 recv_args->rx_pkts[recv_args->nb_rx] = rxm;
883 stats->bytes += rxm->pkt_len;
887 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
888 struct rte_mbuf *mbuf)
890 struct ionic_rxq_desc *desc_base = q->base;
891 struct ionic_rxq_desc *old = &desc_base[q_desc_index];
892 struct ionic_rxq_desc *new = &desc_base[q->head_idx];
894 new->addr = old->addr;
897 ionic_q_post(q, true, ionic_rx_clean, mbuf);
900 static __rte_always_inline int
901 ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
903 struct ionic_queue *q = &rxq->q;
904 struct ionic_rxq_desc *desc_base = q->base;
905 struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;
906 struct ionic_rxq_desc *desc;
907 struct ionic_rxq_sg_desc *sg_desc;
908 struct ionic_rxq_sg_elem *elem;
910 uint32_t i, j, nsegs, buf_size, size;
913 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
914 RTE_PKTMBUF_HEADROOM);
916 /* Initialize software ring entries */
917 for (i = ionic_q_space_avail(q); i; i--) {
918 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
919 struct rte_mbuf *prev_rxm_seg;
922 IONIC_PRINT(ERR, "RX mbuf alloc failed");
926 nsegs = (len + buf_size - 1) / buf_size;
928 desc = &desc_base[q->head_idx];
929 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
930 desc->addr = dma_addr;
931 desc->len = buf_size;
933 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
934 IONIC_RXQ_DESC_OPCODE_SIMPLE;
938 sg_desc = &sg_desc_base[q->head_idx];
939 elem = sg_desc->elems;
940 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
941 struct rte_mbuf *rxm_seg;
942 rte_iova_t data_iova;
944 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
945 if (rxm_seg == NULL) {
946 IONIC_PRINT(ERR, "RX mbuf alloc failed");
950 data_iova = rte_mbuf_data_iova(rxm_seg);
951 dma_addr = rte_cpu_to_le_64(data_iova);
952 elem->addr = dma_addr;
953 elem->len = buf_size;
956 rxm_seg->next = NULL;
957 prev_rxm_seg->next = rxm_seg;
958 prev_rxm_seg = rxm_seg;
962 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
965 ring_doorbell = ((q->head_idx + 1) &
966 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
968 ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);
975 * Start Receive Units for specified queue.
978 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
980 uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
981 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
982 struct ionic_qcq *rxq;
985 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
986 IONIC_PRINT(DEBUG, "RX queue %u already started",
991 rxq = eth_dev->data->rx_queues[rx_queue_id];
993 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
994 rx_queue_id, rxq->q.num_descs, frame_size);
996 if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
997 err = ionic_lif_rxq_init(rxq);
1001 ionic_qcq_enable(rxq);
1004 /* Allocate buffers for descriptor rings */
1005 if (ionic_rx_fill(rxq, frame_size) != 0) {
1006 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1011 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1016 static __rte_always_inline void
1017 ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
1018 void *service_cb_arg)
1020 struct ionic_queue *q = cq->bound_q;
1021 struct ionic_desc_info *q_desc_info;
1022 struct ionic_rxq_comp *cq_desc_base = cq->base;
1023 struct ionic_rxq_comp *cq_desc;
1025 uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1026 uint32_t work_done = 0;
1028 if (work_to_do == 0)
1031 cq_desc = &cq_desc_base[cq->tail_idx];
1032 while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1033 curr_cq_tail_idx = cq->tail_idx;
1034 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1036 if (cq->tail_idx == 0)
1037 cq->done_color = !cq->done_color;
1039 /* Prefetch the next 4 descriptors */
1040 if ((cq->tail_idx & 0x3) == 0)
1041 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1044 more = (q->tail_idx != cq_desc->comp_index);
1046 q_desc_info = &q->info[q->tail_idx];
1048 curr_q_tail_idx = q->tail_idx;
1049 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1051 /* Prefetch the next 4 descriptors */
1052 if ((q->tail_idx & 0x3) == 0)
1054 rte_prefetch0(&q->info[q->tail_idx]);
1056 ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,
1057 q_desc_info->cb_arg, service_cb_arg);
1061 if (++work_done == work_to_do)
1064 cq_desc = &cq_desc_base[cq->tail_idx];
1069 * Stop Receive Units for specified queue.
1072 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1074 struct ionic_qcq *rxq;
1076 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1078 rxq = eth_dev->data->rx_queues[rx_queue_id];
1080 eth_dev->data->rx_queue_state[rx_queue_id] =
1081 RTE_ETH_QUEUE_STATE_STOPPED;
1083 ionic_qcq_disable(rxq);
1086 ionic_rxq_service(&rxq->cq, -1, NULL);
1092 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1095 struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
1096 uint32_t frame_size =
1097 rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1098 struct ionic_cq *cq = &rxq->cq;
1099 struct ionic_rx_service service_cb_arg;
1101 service_cb_arg.rx_pkts = rx_pkts;
1102 service_cb_arg.nb_pkts = nb_pkts;
1103 service_cb_arg.nb_rx = 0;
1105 ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
1107 ionic_rx_fill(rxq, frame_size);
1109 return service_cb_arg.nb_rx;