1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
50 /*********************************************************************
54 **********************************************************************/
57 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
58 struct rte_eth_txq_info *qinfo)
60 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
61 struct ionic_queue *q = &txq->qcq.q;
63 qinfo->nb_desc = q->num_descs;
64 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
65 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
68 static __rte_always_inline void
69 ionic_tx_flush(struct ionic_tx_qcq *txq)
71 struct ionic_cq *cq = &txq->qcq.cq;
72 struct ionic_queue *q = &txq->qcq.q;
73 struct rte_mbuf *txm, *next;
74 struct ionic_txq_comp *cq_desc_base = cq->base;
75 struct ionic_txq_comp *cq_desc;
77 u_int32_t comp_index = (u_int32_t)-1;
79 cq_desc = &cq_desc_base[cq->tail_idx];
80 while (color_match(cq_desc->color, cq->done_color)) {
81 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
83 /* Prefetch the next 4 descriptors (not really useful here) */
84 if ((cq->tail_idx & 0x3) == 0)
85 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
87 if (cq->tail_idx == 0)
88 cq->done_color = !cq->done_color;
90 comp_index = cq_desc->comp_index;
92 cq_desc = &cq_desc_base[cq->tail_idx];
95 if (comp_index != (u_int32_t)-1) {
96 while (q->tail_idx != comp_index) {
97 info = IONIC_INFO_PTR(q, q->tail_idx);
99 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
101 /* Prefetch the next 4 descriptors */
102 if ((q->tail_idx & 0x3) == 0)
104 rte_prefetch0(&q->info[q->tail_idx]);
107 * Note: you can just use rte_pktmbuf_free,
108 * but this loop is faster
111 while (txm != NULL) {
113 rte_pktmbuf_free_seg(txm);
121 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
123 struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
124 struct ionic_tx_stats *stats = &txq->stats;
128 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
129 txq->qcq.q.index, stats->packets, stats->tso);
131 ionic_lif_txq_deinit(txq);
133 ionic_qcq_free(&txq->qcq);
137 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
139 struct ionic_tx_qcq *txq;
141 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
143 txq = eth_dev->data->tx_queues[tx_queue_id];
145 eth_dev->data->tx_queue_state[tx_queue_id] =
146 RTE_ETH_QUEUE_STATE_STOPPED;
149 * Note: we should better post NOP Tx desc and wait for its completion
150 * before disabling Tx queue
153 ionic_qcq_disable(&txq->qcq);
161 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
162 uint16_t nb_desc, uint32_t socket_id,
163 const struct rte_eth_txconf *tx_conf)
165 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
166 struct ionic_tx_qcq *txq;
170 if (tx_queue_id >= lif->ntxqcqs) {
171 IONIC_PRINT(DEBUG, "Queue index %u not available "
173 tx_queue_id, lif->ntxqcqs);
177 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
179 "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
180 socket_id, tx_queue_id, nb_desc, offloads);
182 /* Validate number of receive descriptors */
183 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
184 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
186 /* Free memory prior to re-allocation if needed... */
187 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
188 ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
189 eth_dev->data->tx_queues[tx_queue_id] = NULL;
192 eth_dev->data->tx_queue_state[tx_queue_id] =
193 RTE_ETH_QUEUE_STATE_STOPPED;
195 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
197 IONIC_PRINT(DEBUG, "Queue allocation failure");
201 /* Do not start queue with rte_eth_dev_start() */
202 if (tx_conf->tx_deferred_start)
203 txq->flags |= IONIC_QCQ_F_DEFERRED;
205 /* Convert the offload flags into queue flags */
206 if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
207 txq->flags |= IONIC_QCQ_F_CSUM_L3;
208 if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
209 txq->flags |= IONIC_QCQ_F_CSUM_TCP;
210 if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
211 txq->flags |= IONIC_QCQ_F_CSUM_UDP;
213 eth_dev->data->tx_queues[tx_queue_id] = txq;
219 * Start Transmit Units for specified queue.
222 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
224 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
225 struct ionic_tx_qcq *txq;
228 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
229 IONIC_PRINT(DEBUG, "TX queue %u already started",
234 txq = eth_dev->data->tx_queues[tx_queue_id];
236 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
237 tx_queue_id, txq->qcq.q.num_descs);
239 if (!(txq->flags & IONIC_QCQ_F_INITED)) {
240 err = ionic_lif_txq_init(txq);
244 ionic_qcq_enable(&txq->qcq);
247 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
253 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
255 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
256 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
257 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
258 (l3_hdr + txm->l3_len);
260 if (txm->ol_flags & PKT_TX_IP_CKSUM) {
261 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
262 ipv4_hdr->hdr_checksum = 0;
264 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
266 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
268 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
273 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
275 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
276 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
277 txm->outer_l3_len + txm->l2_len;
278 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
279 (l3_hdr + txm->l3_len);
281 if (txm->ol_flags & PKT_TX_IPV4) {
282 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
283 ipv4_hdr->hdr_checksum = 0;
285 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
287 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
289 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
294 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
295 struct rte_mbuf *txm,
296 rte_iova_t addr, uint8_t nsge, uint16_t len,
297 uint32_t hdrlen, uint32_t mss,
299 uint16_t vlan_tci, bool has_vlan,
300 bool start, bool done)
304 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
305 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
306 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
307 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
309 desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
312 desc->vlan_tci = vlan_tci;
313 desc->hdr_len = hdrlen;
317 info = IONIC_INFO_PTR(q, q->head_idx);
321 q->head_idx = Q_NEXT_TO_POST(q, 1);
324 static struct ionic_txq_desc *
325 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
327 struct ionic_queue *q = &txq->qcq.q;
328 struct ionic_txq_desc *desc_base = q->base;
329 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
330 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
331 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
333 *elem = sg_desc->elems;
338 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
340 struct ionic_queue *q = &txq->qcq.q;
341 struct ionic_tx_stats *stats = &txq->stats;
342 struct ionic_txq_desc *desc;
343 struct ionic_txq_sg_elem *elem;
344 struct rte_mbuf *txm_seg;
345 rte_iova_t data_iova;
346 uint64_t desc_addr = 0, next_addr;
347 uint16_t desc_len = 0;
350 uint32_t mss = txm->tso_segsz;
351 uint32_t frag_left = 0;
358 bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
359 uint16_t vlan_tci = txm->vlan_tci;
360 uint64_t ol_flags = txm->ol_flags;
362 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
363 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
364 ((ol_flags & PKT_TX_OUTER_IPV4) ||
365 (ol_flags & PKT_TX_OUTER_IPV6));
367 /* Preload inner-most TCP csum field with IP pseudo hdr
368 * calculated with IP length set to zero. HW will later
369 * add in length to each TCP segment resulting from the TSO.
373 ionic_tx_tcp_inner_pseudo_csum(txm);
374 hdrlen = txm->outer_l2_len + txm->outer_l3_len +
375 txm->l2_len + txm->l3_len + txm->l4_len;
377 ionic_tx_tcp_pseudo_csum(txm);
378 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
381 seglen = hdrlen + mss;
382 left = txm->data_len;
383 data_iova = rte_mbuf_data_iova(txm);
385 desc = ionic_tx_tso_next(txq, &elem);
388 /* Chop data up into desc segments */
391 len = RTE_MIN(seglen, left);
392 frag_left = seglen - len;
393 desc_addr = rte_cpu_to_le_64(data_iova + offset);
398 if (txm->nb_segs > 1 && frag_left > 0)
400 done = (txm->nb_segs == 1 && left == 0);
401 ionic_tx_tso_post(q, desc, txm,
402 desc_addr, desc_nsge, desc_len,
407 desc = ionic_tx_tso_next(txq, &elem);
412 /* Chop frags into desc segments */
415 while (txm_seg != NULL) {
417 data_iova = rte_mbuf_data_iova(txm_seg);
418 left = txm_seg->data_len;
421 next_addr = rte_cpu_to_le_64(data_iova + offset);
423 len = RTE_MIN(frag_left, left);
425 elem->addr = next_addr;
430 len = RTE_MIN(mss, left);
431 frag_left = mss - len;
432 desc_addr = next_addr;
438 if (txm_seg->next != NULL && frag_left > 0)
441 done = (txm_seg->next == NULL && left == 0);
442 ionic_tx_tso_post(q, desc, txm_seg,
443 desc_addr, desc_nsge, desc_len,
448 desc = ionic_tx_tso_next(txq, &elem);
452 txm_seg = txm_seg->next;
460 static __rte_always_inline int
461 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
463 struct ionic_queue *q = &txq->qcq.q;
464 struct ionic_txq_desc *desc, *desc_base = q->base;
465 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
466 struct ionic_txq_sg_elem *elem;
467 struct ionic_tx_stats *stats = &txq->stats;
468 struct rte_mbuf *txm_seg;
472 uint64_t ol_flags = txm->ol_flags;
474 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
477 desc = &desc_base[q->head_idx];
478 info = IONIC_INFO_PTR(q, q->head_idx);
480 if ((ol_flags & PKT_TX_IP_CKSUM) &&
481 (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
482 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
483 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
486 if (((ol_flags & PKT_TX_TCP_CKSUM) &&
487 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
488 ((ol_flags & PKT_TX_UDP_CKSUM) &&
489 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
490 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
491 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
494 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
497 has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
498 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
499 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
500 ((ol_flags & PKT_TX_OUTER_IPV4) ||
501 (ol_flags & PKT_TX_OUTER_IPV6));
503 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
504 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
506 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
508 desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
509 desc->len = txm->data_len;
510 desc->vlan_tci = txm->vlan_tci;
514 elem = sg_desc_base[q->head_idx].elems;
517 while (txm_seg != NULL) {
518 elem->len = txm_seg->data_len;
519 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
521 txm_seg = txm_seg->next;
524 q->head_idx = Q_NEXT_TO_POST(q, 1);
530 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
533 struct ionic_tx_qcq *txq = tx_queue;
534 struct ionic_queue *q = &txq->qcq.q;
535 struct ionic_tx_stats *stats = &txq->stats;
536 uint32_t next_q_head_idx;
537 uint32_t bytes_tx = 0;
538 uint16_t nb_avail, nb_tx = 0;
541 /* Cleaning old buffers */
544 nb_avail = ionic_q_space_avail(q);
545 if (unlikely(nb_avail < nb_pkts)) {
546 stats->stop += nb_pkts - nb_avail;
550 while (nb_tx < nb_pkts) {
551 next_q_head_idx = Q_NEXT_TO_POST(q, 1);
552 if ((next_q_head_idx & 0x3) == 0) {
553 struct ionic_txq_desc *desc_base = q->base;
554 rte_prefetch0(&desc_base[next_q_head_idx]);
555 rte_prefetch0(&q->info[next_q_head_idx]);
558 if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
559 err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
561 err = ionic_tx(txq, tx_pkts[nb_tx]);
563 stats->drop += nb_pkts - nb_tx;
567 bytes_tx += tx_pkts[nb_tx]->pkt_len;
576 stats->packets += nb_tx;
577 stats->bytes += bytes_tx;
582 /*********************************************************************
586 **********************************************************************/
588 #define IONIC_TX_OFFLOAD_MASK ( \
596 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
597 (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
600 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
602 struct ionic_tx_qcq *txq = tx_queue;
603 struct rte_mbuf *txm;
607 for (i = 0; i < nb_pkts; i++) {
610 if (txm->nb_segs > txq->num_segs_fw) {
615 offloads = txm->ol_flags;
617 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
618 rte_errno = -ENOTSUP;
626 /*********************************************************************
630 **********************************************************************/
632 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
633 struct rte_mbuf *mbuf);
636 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
637 struct rte_eth_rxq_info *qinfo)
639 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
640 struct ionic_queue *q = &rxq->qcq.q;
642 qinfo->mp = rxq->mb_pool;
643 qinfo->scattered_rx = dev->data->scattered_rx;
644 qinfo->nb_desc = q->num_descs;
645 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
646 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
649 static void __rte_cold
650 ionic_rx_empty(struct ionic_rx_qcq *rxq)
652 struct ionic_queue *q = &rxq->qcq.q;
653 struct rte_mbuf *mbuf;
656 while (q->tail_idx != q->head_idx) {
657 info = IONIC_INFO_PTR(q, q->tail_idx);
659 rte_mempool_put(rxq->mb_pool, mbuf);
661 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
666 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
668 struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
669 struct ionic_rx_stats *stats;
678 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
679 rxq->qcq.q.index, stats->packets, stats->mtods);
683 ionic_lif_rxq_deinit(rxq);
685 ionic_qcq_free(&rxq->qcq);
689 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
690 uint16_t rx_queue_id,
693 const struct rte_eth_rxconf *rx_conf,
694 struct rte_mempool *mp)
696 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
697 struct ionic_rx_qcq *rxq;
701 if (rx_queue_id >= lif->nrxqcqs) {
703 "Queue index %u not available (max %u queues)",
704 rx_queue_id, lif->nrxqcqs);
708 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
710 "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
711 socket_id, rx_queue_id, nb_desc, offloads);
713 if (!rx_conf->rx_drop_en)
714 IONIC_PRINT(WARNING, "No-drop mode is not supported");
716 /* Validate number of receive descriptors */
717 if (!rte_is_power_of_2(nb_desc) ||
718 nb_desc < IONIC_MIN_RING_DESC ||
719 nb_desc > IONIC_MAX_RING_DESC) {
721 "Bad descriptor count (%u) for queue %u (min: %u)",
722 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
723 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
726 /* Free memory prior to re-allocation if needed... */
727 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
728 ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
729 eth_dev->data->rx_queues[rx_queue_id] = NULL;
732 eth_dev->data->rx_queue_state[rx_queue_id] =
733 RTE_ETH_QUEUE_STATE_STOPPED;
735 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
738 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
745 * Note: the interface does not currently support
746 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
747 * when the adapter will be able to keep the CRC and subtract
748 * it to the length for all received packets:
749 * if (eth_dev->data->dev_conf.rxmode.offloads &
750 * DEV_RX_OFFLOAD_KEEP_CRC)
751 * rxq->crc_len = ETHER_CRC_LEN;
754 /* Do not start queue with rte_eth_dev_start() */
755 if (rx_conf->rx_deferred_start)
756 rxq->flags |= IONIC_QCQ_F_DEFERRED;
758 eth_dev->data->rx_queues[rx_queue_id] = rxq;
763 static __rte_always_inline void
764 ionic_rx_clean(struct ionic_rx_qcq *rxq,
765 uint32_t q_desc_index, uint32_t cq_desc_index,
766 void *service_cb_arg)
768 struct ionic_queue *q = &rxq->qcq.q;
769 struct ionic_cq *cq = &rxq->qcq.cq;
770 struct ionic_rxq_comp *cq_desc_base = cq->base;
771 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
772 struct rte_mbuf *rxm, *rxm_seg;
773 uint32_t max_frame_size =
774 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
775 uint64_t pkt_flags = 0;
777 struct ionic_rx_stats *stats = &rxq->stats;
778 struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
780 uint32_t buf_size = (uint16_t)
781 (rte_pktmbuf_data_room_size(rxq->mb_pool) -
782 RTE_PKTMBUF_HEADROOM);
786 assert(q_desc_index == cq_desc->comp_index);
788 info = IONIC_INFO_PTR(q, cq_desc->comp_index);
795 rte_pktmbuf_free(rxm);
797 * Note: rte_mempool_put is faster with no segs
798 * rte_mempool_put(rxq->mb_pool, rxm);
803 if (cq_desc->status) {
804 stats->bad_cq_status++;
805 ionic_rx_recycle(q, q_desc_index, rxm);
809 if (recv_args->nb_rx >= recv_args->nb_pkts) {
811 ionic_rx_recycle(q, q_desc_index, rxm);
815 if (cq_desc->len > max_frame_size ||
818 ionic_rx_recycle(q, q_desc_index, rxm);
822 rxm->data_off = RTE_PKTMBUF_HEADROOM;
823 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
824 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
825 rxm->pkt_len = cq_desc->len;
826 rxm->port = rxq->qcq.lif->port_id;
830 rxm->data_len = RTE_MIN(buf_size, left);
831 left -= rxm->data_len;
834 while (rxm_seg && left) {
835 rxm_seg->data_len = RTE_MIN(buf_size, left);
836 left -= rxm_seg->data_len;
838 rxm_seg = rxm_seg->next;
843 pkt_flags |= PKT_RX_RSS_HASH;
844 rxm->hash.rss = cq_desc->rss_hash;
847 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
848 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
849 rxm->vlan_tci = cq_desc->vlan_tci;
853 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
854 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
855 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
856 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
857 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
859 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
860 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
861 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
862 else if ((cq_desc->csum_flags &
863 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
864 (cq_desc->csum_flags &
865 IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
866 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
869 rxm->ol_flags = pkt_flags;
872 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
873 case IONIC_PKT_TYPE_IPV4:
874 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
876 case IONIC_PKT_TYPE_IPV6:
877 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
879 case IONIC_PKT_TYPE_IPV4_TCP:
880 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
883 case IONIC_PKT_TYPE_IPV6_TCP:
884 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
887 case IONIC_PKT_TYPE_IPV4_UDP:
888 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
891 case IONIC_PKT_TYPE_IPV6_UDP:
892 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
897 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
898 struct rte_ether_hdr *);
899 uint16_t ether_type = eth_h->ether_type;
900 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
901 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
903 pkt_type = RTE_PTYPE_UNKNOWN;
909 rxm->packet_type = pkt_type;
911 recv_args->rx_pkts[recv_args->nb_rx] = rxm;
915 stats->bytes += rxm->pkt_len;
919 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
920 struct rte_mbuf *mbuf)
922 struct ionic_rxq_desc *desc_base = q->base;
923 struct ionic_rxq_desc *old = &desc_base[q_desc_index];
924 struct ionic_rxq_desc *new = &desc_base[q->head_idx];
926 new->addr = old->addr;
929 q->info[q->head_idx] = mbuf;
931 q->head_idx = Q_NEXT_TO_POST(q, 1);
936 static __rte_always_inline int
937 ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
939 struct ionic_queue *q = &rxq->qcq.q;
940 struct ionic_rxq_desc *desc, *desc_base = q->base;
941 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
942 struct ionic_rxq_sg_elem *elem;
945 uint32_t i, j, nsegs, buf_size, size;
947 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
948 RTE_PKTMBUF_HEADROOM);
950 /* Initialize software ring entries */
951 for (i = ionic_q_space_avail(q); i; i--) {
952 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
953 struct rte_mbuf *prev_rxm_seg;
956 IONIC_PRINT(ERR, "RX mbuf alloc failed");
960 info = IONIC_INFO_PTR(q, q->head_idx);
962 nsegs = (len + buf_size - 1) / buf_size;
964 desc = &desc_base[q->head_idx];
965 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
966 desc->addr = dma_addr;
967 desc->len = buf_size;
969 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
970 IONIC_RXQ_DESC_OPCODE_SIMPLE;
974 sg_desc = &sg_desc_base[q->head_idx];
975 elem = sg_desc->elems;
976 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
977 struct rte_mbuf *rxm_seg;
978 rte_iova_t data_iova;
980 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
981 if (rxm_seg == NULL) {
982 IONIC_PRINT(ERR, "RX mbuf alloc failed");
986 data_iova = rte_mbuf_data_iova(rxm_seg);
987 dma_addr = rte_cpu_to_le_64(data_iova);
988 elem->addr = dma_addr;
989 elem->len = buf_size;
992 rxm_seg->next = NULL;
993 prev_rxm_seg->next = rxm_seg;
994 prev_rxm_seg = rxm_seg;
998 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
1003 q->head_idx = Q_NEXT_TO_POST(q, 1);
1012 * Start Receive Units for specified queue.
1015 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1017 uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1018 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1019 struct ionic_rx_qcq *rxq;
1022 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1023 IONIC_PRINT(DEBUG, "RX queue %u already started",
1028 rxq = eth_dev->data->rx_queues[rx_queue_id];
1030 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
1031 rx_queue_id, rxq->qcq.q.num_descs, frame_size);
1033 if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
1034 err = ionic_lif_rxq_init(rxq);
1038 ionic_qcq_enable(&rxq->qcq);
1041 /* Allocate buffers for descriptor rings */
1042 if (ionic_rx_fill(rxq, frame_size) != 0) {
1043 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1048 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1053 static __rte_always_inline void
1054 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1055 void *service_cb_arg)
1057 struct ionic_cq *cq = &rxq->qcq.cq;
1058 struct ionic_queue *q = &rxq->qcq.q;
1059 struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1061 uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1062 uint32_t work_done = 0;
1064 if (work_to_do == 0)
1067 cq_desc = &cq_desc_base[cq->tail_idx];
1068 while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1069 curr_cq_tail_idx = cq->tail_idx;
1070 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1072 if (cq->tail_idx == 0)
1073 cq->done_color = !cq->done_color;
1075 /* Prefetch the next 4 descriptors */
1076 if ((cq->tail_idx & 0x3) == 0)
1077 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1080 more = (q->tail_idx != cq_desc->comp_index);
1082 curr_q_tail_idx = q->tail_idx;
1083 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1085 /* Prefetch the next 4 descriptors */
1086 if ((q->tail_idx & 0x3) == 0)
1088 rte_prefetch0(&q->info[q->tail_idx]);
1090 ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx,
1095 if (++work_done == work_to_do)
1098 cq_desc = &cq_desc_base[cq->tail_idx];
1103 * Stop Receive Units for specified queue.
1106 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1108 struct ionic_rx_qcq *rxq;
1110 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1112 rxq = eth_dev->data->rx_queues[rx_queue_id];
1114 eth_dev->data->rx_queue_state[rx_queue_id] =
1115 RTE_ETH_QUEUE_STATE_STOPPED;
1117 ionic_qcq_disable(&rxq->qcq);
1120 ionic_rxq_service(rxq, -1, NULL);
1126 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1129 struct ionic_rx_qcq *rxq = rx_queue;
1130 uint32_t frame_size =
1131 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1132 struct ionic_rx_service service_cb_arg;
1134 service_cb_arg.rx_pkts = rx_pkts;
1135 service_cb_arg.nb_pkts = nb_pkts;
1136 service_cb_arg.nb_rx = 0;
1138 ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
1140 ionic_rx_fill(rxq, frame_size);
1142 return service_cb_arg.nb_rx;