1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
50 /*********************************************************************
54 **********************************************************************/
57 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
58 struct rte_eth_txq_info *qinfo)
60 struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
61 struct ionic_queue *q = &txq->qcq.q;
63 qinfo->nb_desc = q->num_descs;
64 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
65 qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
68 static __rte_always_inline void
69 ionic_tx_flush(struct ionic_tx_qcq *txq)
71 struct ionic_cq *cq = &txq->qcq.cq;
72 struct ionic_queue *q = &txq->qcq.q;
73 struct rte_mbuf *txm, *next;
74 struct ionic_txq_comp *cq_desc_base = cq->base;
75 struct ionic_txq_comp *cq_desc;
77 u_int32_t comp_index = (u_int32_t)-1;
79 cq_desc = &cq_desc_base[cq->tail_idx];
80 while (color_match(cq_desc->color, cq->done_color)) {
81 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
83 /* Prefetch the next 4 descriptors (not really useful here) */
84 if ((cq->tail_idx & 0x3) == 0)
85 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
87 if (cq->tail_idx == 0)
88 cq->done_color = !cq->done_color;
90 comp_index = cq_desc->comp_index;
92 cq_desc = &cq_desc_base[cq->tail_idx];
95 if (comp_index != (u_int32_t)-1) {
96 while (q->tail_idx != comp_index) {
97 info = IONIC_INFO_PTR(q, q->tail_idx);
99 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
101 /* Prefetch the next 4 descriptors */
102 if ((q->tail_idx & 0x3) == 0)
104 rte_prefetch0(&q->info[q->tail_idx]);
107 * Note: you can just use rte_pktmbuf_free,
108 * but this loop is faster
111 while (txm != NULL) {
113 rte_pktmbuf_free_seg(txm);
121 ionic_dev_tx_queue_release(void *tx_queue)
123 struct ionic_tx_qcq *txq = tx_queue;
124 struct ionic_tx_stats *stats = &txq->stats;
128 IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
129 txq->qcq.q.index, stats->packets, stats->tso);
131 ionic_lif_txq_deinit(txq);
133 ionic_qcq_free(&txq->qcq);
137 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
139 struct ionic_tx_qcq *txq;
141 IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
143 txq = eth_dev->data->tx_queues[tx_queue_id];
145 eth_dev->data->tx_queue_state[tx_queue_id] =
146 RTE_ETH_QUEUE_STATE_STOPPED;
149 * Note: we should better post NOP Tx desc and wait for its completion
150 * before disabling Tx queue
153 ionic_qcq_disable(&txq->qcq);
161 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
162 uint16_t nb_desc, uint32_t socket_id,
163 const struct rte_eth_txconf *tx_conf)
165 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
166 struct ionic_tx_qcq *txq;
170 if (tx_queue_id >= lif->ntxqcqs) {
171 IONIC_PRINT(DEBUG, "Queue index %u not available "
173 tx_queue_id, lif->ntxqcqs);
177 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
179 "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
180 socket_id, tx_queue_id, nb_desc, offloads);
182 /* Validate number of receive descriptors */
183 if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
184 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
186 /* Free memory prior to re-allocation if needed... */
187 if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
188 void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
189 ionic_dev_tx_queue_release(tx_queue);
190 eth_dev->data->tx_queues[tx_queue_id] = NULL;
193 eth_dev->data->tx_queue_state[tx_queue_id] =
194 RTE_ETH_QUEUE_STATE_STOPPED;
196 err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
198 IONIC_PRINT(DEBUG, "Queue allocation failure");
202 /* Do not start queue with rte_eth_dev_start() */
203 if (tx_conf->tx_deferred_start)
204 txq->flags |= IONIC_QCQ_F_DEFERRED;
206 /* Convert the offload flags into queue flags */
207 if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
208 txq->flags |= IONIC_QCQ_F_CSUM_L3;
209 if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
210 txq->flags |= IONIC_QCQ_F_CSUM_TCP;
211 if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
212 txq->flags |= IONIC_QCQ_F_CSUM_UDP;
214 eth_dev->data->tx_queues[tx_queue_id] = txq;
220 * Start Transmit Units for specified queue.
223 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
225 uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
226 struct ionic_tx_qcq *txq;
229 if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
230 IONIC_PRINT(DEBUG, "TX queue %u already started",
235 txq = eth_dev->data->tx_queues[tx_queue_id];
237 IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
238 tx_queue_id, txq->qcq.q.num_descs);
240 if (!(txq->flags & IONIC_QCQ_F_INITED)) {
241 err = ionic_lif_txq_init(txq);
245 ionic_qcq_enable(&txq->qcq);
248 tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
254 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
256 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
257 char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
258 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
259 (l3_hdr + txm->l3_len);
261 if (txm->ol_flags & PKT_TX_IP_CKSUM) {
262 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
263 ipv4_hdr->hdr_checksum = 0;
265 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
267 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
269 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
274 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
276 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
277 char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
278 txm->outer_l3_len + txm->l2_len;
279 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
280 (l3_hdr + txm->l3_len);
282 if (txm->ol_flags & PKT_TX_IPV4) {
283 struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
284 ipv4_hdr->hdr_checksum = 0;
286 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
288 struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
290 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
295 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
296 struct rte_mbuf *txm,
297 rte_iova_t addr, uint8_t nsge, uint16_t len,
298 uint32_t hdrlen, uint32_t mss,
300 uint16_t vlan_tci, bool has_vlan,
301 bool start, bool done)
305 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
306 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
307 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
308 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
310 desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
313 desc->vlan_tci = vlan_tci;
314 desc->hdr_len = hdrlen;
318 info = IONIC_INFO_PTR(q, q->head_idx);
322 q->head_idx = Q_NEXT_TO_POST(q, 1);
325 static struct ionic_txq_desc *
326 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
328 struct ionic_queue *q = &txq->qcq.q;
329 struct ionic_txq_desc *desc_base = q->base;
330 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
331 struct ionic_txq_desc *desc = &desc_base[q->head_idx];
332 struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
334 *elem = sg_desc->elems;
339 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
341 struct ionic_queue *q = &txq->qcq.q;
342 struct ionic_tx_stats *stats = &txq->stats;
343 struct ionic_txq_desc *desc;
344 struct ionic_txq_sg_elem *elem;
345 struct rte_mbuf *txm_seg;
346 rte_iova_t data_iova;
347 uint64_t desc_addr = 0, next_addr;
348 uint16_t desc_len = 0;
351 uint32_t mss = txm->tso_segsz;
352 uint32_t frag_left = 0;
359 bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
360 uint16_t vlan_tci = txm->vlan_tci;
361 uint64_t ol_flags = txm->ol_flags;
363 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
364 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
365 ((ol_flags & PKT_TX_OUTER_IPV4) ||
366 (ol_flags & PKT_TX_OUTER_IPV6));
368 /* Preload inner-most TCP csum field with IP pseudo hdr
369 * calculated with IP length set to zero. HW will later
370 * add in length to each TCP segment resulting from the TSO.
374 ionic_tx_tcp_inner_pseudo_csum(txm);
375 hdrlen = txm->outer_l2_len + txm->outer_l3_len +
376 txm->l2_len + txm->l3_len + txm->l4_len;
378 ionic_tx_tcp_pseudo_csum(txm);
379 hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
382 seglen = hdrlen + mss;
383 left = txm->data_len;
384 data_iova = rte_mbuf_data_iova(txm);
386 desc = ionic_tx_tso_next(txq, &elem);
389 /* Chop data up into desc segments */
392 len = RTE_MIN(seglen, left);
393 frag_left = seglen - len;
394 desc_addr = rte_cpu_to_le_64(data_iova + offset);
399 if (txm->nb_segs > 1 && frag_left > 0)
401 done = (txm->nb_segs == 1 && left == 0);
402 ionic_tx_tso_post(q, desc, txm,
403 desc_addr, desc_nsge, desc_len,
408 desc = ionic_tx_tso_next(txq, &elem);
413 /* Chop frags into desc segments */
416 while (txm_seg != NULL) {
418 data_iova = rte_mbuf_data_iova(txm_seg);
419 left = txm_seg->data_len;
422 next_addr = rte_cpu_to_le_64(data_iova + offset);
424 len = RTE_MIN(frag_left, left);
426 elem->addr = next_addr;
431 len = RTE_MIN(mss, left);
432 frag_left = mss - len;
433 desc_addr = next_addr;
439 if (txm_seg->next != NULL && frag_left > 0)
442 done = (txm_seg->next == NULL && left == 0);
443 ionic_tx_tso_post(q, desc, txm_seg,
444 desc_addr, desc_nsge, desc_len,
449 desc = ionic_tx_tso_next(txq, &elem);
453 txm_seg = txm_seg->next;
461 static __rte_always_inline int
462 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
464 struct ionic_queue *q = &txq->qcq.q;
465 struct ionic_txq_desc *desc, *desc_base = q->base;
466 struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
467 struct ionic_txq_sg_elem *elem;
468 struct ionic_tx_stats *stats = &txq->stats;
469 struct rte_mbuf *txm_seg;
473 uint64_t ol_flags = txm->ol_flags;
475 uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
478 desc = &desc_base[q->head_idx];
479 info = IONIC_INFO_PTR(q, q->head_idx);
481 if ((ol_flags & PKT_TX_IP_CKSUM) &&
482 (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
483 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
484 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
487 if (((ol_flags & PKT_TX_TCP_CKSUM) &&
488 (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
489 ((ol_flags & PKT_TX_UDP_CKSUM) &&
490 (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
491 opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
492 flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
495 if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
498 has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
499 encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
500 (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
501 ((ol_flags & PKT_TX_OUTER_IPV4) ||
502 (ol_flags & PKT_TX_OUTER_IPV6));
504 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
505 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
507 addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
509 desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
510 desc->len = txm->data_len;
511 desc->vlan_tci = txm->vlan_tci;
515 elem = sg_desc_base[q->head_idx].elems;
518 while (txm_seg != NULL) {
519 elem->len = txm_seg->data_len;
520 elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
522 txm_seg = txm_seg->next;
525 q->head_idx = Q_NEXT_TO_POST(q, 1);
531 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
534 struct ionic_tx_qcq *txq = tx_queue;
535 struct ionic_queue *q = &txq->qcq.q;
536 struct ionic_tx_stats *stats = &txq->stats;
537 uint32_t next_q_head_idx;
538 uint32_t bytes_tx = 0;
539 uint16_t nb_avail, nb_tx = 0;
542 /* Cleaning old buffers */
545 nb_avail = ionic_q_space_avail(q);
546 if (unlikely(nb_avail < nb_pkts)) {
547 stats->stop += nb_pkts - nb_avail;
551 while (nb_tx < nb_pkts) {
552 next_q_head_idx = Q_NEXT_TO_POST(q, 1);
553 if ((next_q_head_idx & 0x3) == 0) {
554 struct ionic_txq_desc *desc_base = q->base;
555 rte_prefetch0(&desc_base[next_q_head_idx]);
556 rte_prefetch0(&q->info[next_q_head_idx]);
559 if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
560 err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
562 err = ionic_tx(txq, tx_pkts[nb_tx]);
564 stats->drop += nb_pkts - nb_tx;
568 bytes_tx += tx_pkts[nb_tx]->pkt_len;
577 stats->packets += nb_tx;
578 stats->bytes += bytes_tx;
583 /*********************************************************************
587 **********************************************************************/
589 #define IONIC_TX_OFFLOAD_MASK ( \
597 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
598 (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
601 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
603 struct ionic_tx_qcq *txq = tx_queue;
604 struct rte_mbuf *txm;
608 for (i = 0; i < nb_pkts; i++) {
611 if (txm->nb_segs > txq->num_segs_fw) {
616 offloads = txm->ol_flags;
618 if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
619 rte_errno = -ENOTSUP;
627 /*********************************************************************
631 **********************************************************************/
633 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
634 struct rte_mbuf *mbuf);
637 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
638 struct rte_eth_rxq_info *qinfo)
640 struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
641 struct ionic_queue *q = &rxq->qcq.q;
643 qinfo->mp = rxq->mb_pool;
644 qinfo->scattered_rx = dev->data->scattered_rx;
645 qinfo->nb_desc = q->num_descs;
646 qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
647 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
650 static void __rte_cold
651 ionic_rx_empty(struct ionic_rx_qcq *rxq)
653 struct ionic_queue *q = &rxq->qcq.q;
654 struct rte_mbuf *mbuf;
657 while (q->tail_idx != q->head_idx) {
658 info = IONIC_INFO_PTR(q, q->tail_idx);
660 rte_mempool_put(rxq->mb_pool, mbuf);
662 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
667 ionic_dev_rx_queue_release(void *rx_queue)
669 struct ionic_rx_qcq *rxq = rx_queue;
670 struct ionic_rx_stats *stats;
679 IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
680 rxq->qcq.q.index, stats->packets, stats->mtods);
684 ionic_lif_rxq_deinit(rxq);
686 ionic_qcq_free(&rxq->qcq);
690 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
691 uint16_t rx_queue_id,
694 const struct rte_eth_rxconf *rx_conf,
695 struct rte_mempool *mp)
697 struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
698 struct ionic_rx_qcq *rxq;
702 if (rx_queue_id >= lif->nrxqcqs) {
704 "Queue index %u not available (max %u queues)",
705 rx_queue_id, lif->nrxqcqs);
709 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
711 "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
712 socket_id, rx_queue_id, nb_desc, offloads);
714 if (!rx_conf->rx_drop_en)
715 IONIC_PRINT(WARNING, "No-drop mode is not supported");
717 /* Validate number of receive descriptors */
718 if (!rte_is_power_of_2(nb_desc) ||
719 nb_desc < IONIC_MIN_RING_DESC ||
720 nb_desc > IONIC_MAX_RING_DESC) {
722 "Bad descriptor count (%u) for queue %u (min: %u)",
723 nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
724 return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
727 /* Free memory prior to re-allocation if needed... */
728 if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
729 void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
730 ionic_dev_rx_queue_release(rx_queue);
731 eth_dev->data->rx_queues[rx_queue_id] = NULL;
734 eth_dev->data->rx_queue_state[rx_queue_id] =
735 RTE_ETH_QUEUE_STATE_STOPPED;
737 err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
740 IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
747 * Note: the interface does not currently support
748 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
749 * when the adapter will be able to keep the CRC and subtract
750 * it to the length for all received packets:
751 * if (eth_dev->data->dev_conf.rxmode.offloads &
752 * DEV_RX_OFFLOAD_KEEP_CRC)
753 * rxq->crc_len = ETHER_CRC_LEN;
756 /* Do not start queue with rte_eth_dev_start() */
757 if (rx_conf->rx_deferred_start)
758 rxq->flags |= IONIC_QCQ_F_DEFERRED;
760 eth_dev->data->rx_queues[rx_queue_id] = rxq;
765 static __rte_always_inline void
766 ionic_rx_clean(struct ionic_rx_qcq *rxq,
767 uint32_t q_desc_index, uint32_t cq_desc_index,
768 void *service_cb_arg)
770 struct ionic_queue *q = &rxq->qcq.q;
771 struct ionic_cq *cq = &rxq->qcq.cq;
772 struct ionic_rxq_comp *cq_desc_base = cq->base;
773 struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
774 struct rte_mbuf *rxm, *rxm_seg;
775 uint32_t max_frame_size =
776 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
777 uint64_t pkt_flags = 0;
779 struct ionic_rx_stats *stats = &rxq->stats;
780 struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
782 uint32_t buf_size = (uint16_t)
783 (rte_pktmbuf_data_room_size(rxq->mb_pool) -
784 RTE_PKTMBUF_HEADROOM);
788 assert(q_desc_index == cq_desc->comp_index);
790 info = IONIC_INFO_PTR(q, cq_desc->comp_index);
797 rte_pktmbuf_free(rxm);
799 * Note: rte_mempool_put is faster with no segs
800 * rte_mempool_put(rxq->mb_pool, rxm);
805 if (cq_desc->status) {
806 stats->bad_cq_status++;
807 ionic_rx_recycle(q, q_desc_index, rxm);
811 if (recv_args->nb_rx >= recv_args->nb_pkts) {
813 ionic_rx_recycle(q, q_desc_index, rxm);
817 if (cq_desc->len > max_frame_size ||
820 ionic_rx_recycle(q, q_desc_index, rxm);
824 rxm->data_off = RTE_PKTMBUF_HEADROOM;
825 rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
826 rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
827 rxm->pkt_len = cq_desc->len;
828 rxm->port = rxq->qcq.lif->port_id;
832 rxm->data_len = RTE_MIN(buf_size, left);
833 left -= rxm->data_len;
836 while (rxm_seg && left) {
837 rxm_seg->data_len = RTE_MIN(buf_size, left);
838 left -= rxm_seg->data_len;
840 rxm_seg = rxm_seg->next;
845 pkt_flags |= PKT_RX_RSS_HASH;
846 rxm->hash.rss = cq_desc->rss_hash;
849 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
850 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
851 rxm->vlan_tci = cq_desc->vlan_tci;
855 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
856 if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
857 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
858 else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
859 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
861 if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
862 (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
863 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
864 else if ((cq_desc->csum_flags &
865 IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
866 (cq_desc->csum_flags &
867 IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
868 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
871 rxm->ol_flags = pkt_flags;
874 switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
875 case IONIC_PKT_TYPE_IPV4:
876 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
878 case IONIC_PKT_TYPE_IPV6:
879 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
881 case IONIC_PKT_TYPE_IPV4_TCP:
882 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
885 case IONIC_PKT_TYPE_IPV6_TCP:
886 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
889 case IONIC_PKT_TYPE_IPV4_UDP:
890 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
893 case IONIC_PKT_TYPE_IPV6_UDP:
894 pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
899 struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
900 struct rte_ether_hdr *);
901 uint16_t ether_type = eth_h->ether_type;
902 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
903 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
905 pkt_type = RTE_PTYPE_UNKNOWN;
911 rxm->packet_type = pkt_type;
913 recv_args->rx_pkts[recv_args->nb_rx] = rxm;
917 stats->bytes += rxm->pkt_len;
921 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
922 struct rte_mbuf *mbuf)
924 struct ionic_rxq_desc *desc_base = q->base;
925 struct ionic_rxq_desc *old = &desc_base[q_desc_index];
926 struct ionic_rxq_desc *new = &desc_base[q->head_idx];
928 new->addr = old->addr;
931 q->info[q->head_idx] = mbuf;
933 q->head_idx = Q_NEXT_TO_POST(q, 1);
938 static __rte_always_inline int
939 ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
941 struct ionic_queue *q = &rxq->qcq.q;
942 struct ionic_rxq_desc *desc, *desc_base = q->base;
943 struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
944 struct ionic_rxq_sg_elem *elem;
947 uint32_t i, j, nsegs, buf_size, size;
949 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
950 RTE_PKTMBUF_HEADROOM);
952 /* Initialize software ring entries */
953 for (i = ionic_q_space_avail(q); i; i--) {
954 struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
955 struct rte_mbuf *prev_rxm_seg;
958 IONIC_PRINT(ERR, "RX mbuf alloc failed");
962 info = IONIC_INFO_PTR(q, q->head_idx);
964 nsegs = (len + buf_size - 1) / buf_size;
966 desc = &desc_base[q->head_idx];
967 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
968 desc->addr = dma_addr;
969 desc->len = buf_size;
971 desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
972 IONIC_RXQ_DESC_OPCODE_SIMPLE;
976 sg_desc = &sg_desc_base[q->head_idx];
977 elem = sg_desc->elems;
978 for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
979 struct rte_mbuf *rxm_seg;
980 rte_iova_t data_iova;
982 rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
983 if (rxm_seg == NULL) {
984 IONIC_PRINT(ERR, "RX mbuf alloc failed");
988 data_iova = rte_mbuf_data_iova(rxm_seg);
989 dma_addr = rte_cpu_to_le_64(data_iova);
990 elem->addr = dma_addr;
991 elem->len = buf_size;
994 rxm_seg->next = NULL;
995 prev_rxm_seg->next = rxm_seg;
996 prev_rxm_seg = rxm_seg;
1000 IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
1005 q->head_idx = Q_NEXT_TO_POST(q, 1);
1014 * Start Receive Units for specified queue.
1017 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1019 uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1020 uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1021 struct ionic_rx_qcq *rxq;
1024 if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1025 IONIC_PRINT(DEBUG, "RX queue %u already started",
1030 rxq = eth_dev->data->rx_queues[rx_queue_id];
1032 IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
1033 rx_queue_id, rxq->qcq.q.num_descs, frame_size);
1035 if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
1036 err = ionic_lif_rxq_init(rxq);
1040 ionic_qcq_enable(&rxq->qcq);
1043 /* Allocate buffers for descriptor rings */
1044 if (ionic_rx_fill(rxq, frame_size) != 0) {
1045 IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1050 rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1055 static __rte_always_inline void
1056 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1057 void *service_cb_arg)
1059 struct ionic_cq *cq = &rxq->qcq.cq;
1060 struct ionic_queue *q = &rxq->qcq.q;
1061 struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1063 uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1064 uint32_t work_done = 0;
1066 if (work_to_do == 0)
1069 cq_desc = &cq_desc_base[cq->tail_idx];
1070 while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1071 curr_cq_tail_idx = cq->tail_idx;
1072 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1074 if (cq->tail_idx == 0)
1075 cq->done_color = !cq->done_color;
1077 /* Prefetch the next 4 descriptors */
1078 if ((cq->tail_idx & 0x3) == 0)
1079 rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1082 more = (q->tail_idx != cq_desc->comp_index);
1084 curr_q_tail_idx = q->tail_idx;
1085 q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1087 /* Prefetch the next 4 descriptors */
1088 if ((q->tail_idx & 0x3) == 0)
1090 rte_prefetch0(&q->info[q->tail_idx]);
1092 ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx,
1097 if (++work_done == work_to_do)
1100 cq_desc = &cq_desc_base[cq->tail_idx];
1105 * Stop Receive Units for specified queue.
1108 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1110 struct ionic_rx_qcq *rxq;
1112 IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1114 rxq = eth_dev->data->rx_queues[rx_queue_id];
1116 eth_dev->data->rx_queue_state[rx_queue_id] =
1117 RTE_ETH_QUEUE_STATE_STOPPED;
1119 ionic_qcq_disable(&rxq->qcq);
1122 ionic_rxq_service(rxq, -1, NULL);
1128 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1131 struct ionic_rx_qcq *rxq = rx_queue;
1132 uint32_t frame_size =
1133 rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1134 struct ionic_rx_service service_cb_arg;
1136 service_cb_arg.rx_pkts = rx_pkts;
1137 service_cb_arg.nb_pkts = nb_pkts;
1138 service_cb_arg.nb_rx = 0;
1140 ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
1142 ionic_rx_fill(rxq, frame_size);
1144 return service_cb_arg.nb_rx;