1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
28 #include "iavf_rxtx.h"
31 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
33 /* The following constraints must be satisfied:
34 * thresh < rxq->nb_rx_desc
36 if (thresh >= nb_desc) {
37 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
45 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
46 uint16_t tx_free_thresh)
48 /* TX descriptors will have their RS bit set after tx_rs_thresh
49 * descriptors have been used. The TX descriptor ring will be cleaned
50 * after tx_free_thresh descriptors are used or if the number of
51 * descriptors required to transmit a packet is greater than the
52 * number of free TX descriptors.
54 * The following constraints must be satisfied:
55 * - tx_rs_thresh must be less than the size of the ring minus 2.
56 * - tx_free_thresh must be less than the size of the ring minus 3.
57 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
58 * - tx_rs_thresh must be a divisor of the ring size.
60 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
61 * race condition, hence the maximum threshold constraints. When set
62 * to zero use default values.
64 if (tx_rs_thresh >= (nb_desc - 2)) {
65 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
66 "number of TX descriptors (%u) minus 2",
67 tx_rs_thresh, nb_desc);
70 if (tx_free_thresh >= (nb_desc - 3)) {
71 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
72 "number of TX descriptors (%u) minus 3.",
73 tx_free_thresh, nb_desc);
76 if (tx_rs_thresh > tx_free_thresh) {
77 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
78 "equal to tx_free_thresh (%u).",
79 tx_rs_thresh, tx_free_thresh);
82 if ((nb_desc % tx_rs_thresh) != 0) {
83 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
84 "number of TX descriptors (%u).",
85 tx_rs_thresh, nb_desc);
93 check_rx_vec_allow(struct iavf_rx_queue *rxq)
95 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
96 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
97 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
101 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
106 check_tx_vec_allow(struct iavf_tx_queue *txq)
108 if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
109 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
110 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
111 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
114 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
119 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
123 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
124 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
125 "rxq->rx_free_thresh=%d, "
126 "IAVF_RX_MAX_BURST=%d",
127 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
129 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
130 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
131 "rxq->nb_rx_desc=%d, "
132 "rxq->rx_free_thresh=%d",
133 rxq->nb_rx_desc, rxq->rx_free_thresh);
140 reset_rx_queue(struct iavf_rx_queue *rxq)
148 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
150 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
151 ((volatile char *)rxq->rx_ring)[i] = 0;
153 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
155 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
156 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
159 rxq->rx_nb_avail = 0;
160 rxq->rx_next_avail = 0;
161 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
165 rxq->pkt_first_seg = NULL;
166 rxq->pkt_last_seg = NULL;
170 reset_tx_queue(struct iavf_tx_queue *txq)
172 struct iavf_tx_entry *txe;
177 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
182 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
183 for (i = 0; i < size; i++)
184 ((volatile char *)txq->tx_ring)[i] = 0;
186 prev = (uint16_t)(txq->nb_tx_desc - 1);
187 for (i = 0; i < txq->nb_tx_desc; i++) {
188 txq->tx_ring[i].cmd_type_offset_bsz =
189 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
192 txe[prev].next_id = i;
199 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
200 txq->nb_free = txq->nb_tx_desc - 1;
202 txq->next_dd = txq->rs_thresh - 1;
203 txq->next_rs = txq->rs_thresh - 1;
207 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
209 volatile union iavf_rx_desc *rxd;
210 struct rte_mbuf *mbuf = NULL;
214 for (i = 0; i < rxq->nb_rx_desc; i++) {
215 mbuf = rte_mbuf_raw_alloc(rxq->mp);
216 if (unlikely(!mbuf)) {
217 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
221 rte_mbuf_refcnt_set(mbuf, 1);
223 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
225 mbuf->port = rxq->port_id;
228 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
230 rxd = &rxq->rx_ring[i];
231 rxd->read.pkt_addr = dma_addr;
232 rxd->read.hdr_addr = 0;
233 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
238 rxq->sw_ring[i] = mbuf;
245 release_rxq_mbufs(struct iavf_rx_queue *rxq)
252 for (i = 0; i < rxq->nb_rx_desc; i++) {
253 if (rxq->sw_ring[i]) {
254 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
255 rxq->sw_ring[i] = NULL;
260 if (rxq->rx_nb_avail == 0)
262 for (i = 0; i < rxq->rx_nb_avail; i++) {
263 struct rte_mbuf *mbuf;
265 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
266 rte_pktmbuf_free_seg(mbuf);
268 rxq->rx_nb_avail = 0;
272 release_txq_mbufs(struct iavf_tx_queue *txq)
276 if (!txq || !txq->sw_ring) {
277 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
281 for (i = 0; i < txq->nb_tx_desc; i++) {
282 if (txq->sw_ring[i].mbuf) {
283 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
284 txq->sw_ring[i].mbuf = NULL;
289 static const struct iavf_rxq_ops def_rxq_ops = {
290 .release_mbufs = release_rxq_mbufs,
293 static const struct iavf_txq_ops def_txq_ops = {
294 .release_mbufs = release_txq_mbufs,
298 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
299 uint16_t nb_desc, unsigned int socket_id,
300 const struct rte_eth_rxconf *rx_conf,
301 struct rte_mempool *mp)
303 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
304 struct iavf_adapter *ad =
305 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
306 struct iavf_rx_queue *rxq;
307 const struct rte_memzone *mz;
310 uint16_t rx_free_thresh;
312 PMD_INIT_FUNC_TRACE();
314 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
315 nb_desc > IAVF_MAX_RING_DESC ||
316 nb_desc < IAVF_MIN_RING_DESC) {
317 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
322 /* Check free threshold */
323 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
324 IAVF_DEFAULT_RX_FREE_THRESH :
325 rx_conf->rx_free_thresh;
326 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
329 /* Free memory if needed */
330 if (dev->data->rx_queues[queue_idx]) {
331 iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
332 dev->data->rx_queues[queue_idx] = NULL;
335 /* Allocate the rx queue data structure */
336 rxq = rte_zmalloc_socket("iavf rxq",
337 sizeof(struct iavf_rx_queue),
341 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
342 "rx queue data structure");
347 rxq->nb_rx_desc = nb_desc;
348 rxq->rx_free_thresh = rx_free_thresh;
349 rxq->queue_id = queue_idx;
350 rxq->port_id = dev->data->port_id;
351 rxq->crc_len = 0; /* crc stripping by default */
352 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
355 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
356 rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
358 /* Allocate the software ring. */
359 len = nb_desc + IAVF_RX_MAX_BURST;
361 rte_zmalloc_socket("iavf rx sw ring",
362 sizeof(struct rte_mbuf *) * len,
366 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
371 /* Allocate the maximun number of RX ring hardware descriptor with
372 * a liitle more to support bulk allocate.
374 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
375 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
377 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
378 ring_size, IAVF_RING_BASE_ALIGN,
381 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
382 rte_free(rxq->sw_ring);
386 /* Zero all the descriptors in the ring. */
387 memset(mz->addr, 0, ring_size);
388 rxq->rx_ring_phys_addr = mz->iova;
389 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
394 dev->data->rx_queues[queue_idx] = rxq;
395 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
396 rxq->ops = &def_rxq_ops;
398 if (check_rx_bulk_allow(rxq) == TRUE) {
399 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
400 "satisfied. Rx Burst Bulk Alloc function will be "
401 "used on port=%d, queue=%d.",
402 rxq->port_id, rxq->queue_id);
404 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
405 "not satisfied, Scattered Rx is requested "
406 "on port=%d, queue=%d.",
407 rxq->port_id, rxq->queue_id);
408 ad->rx_bulk_alloc_allowed = false;
411 if (check_rx_vec_allow(rxq) == FALSE)
412 ad->rx_vec_allowed = false;
418 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
421 unsigned int socket_id,
422 const struct rte_eth_txconf *tx_conf)
424 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
425 struct iavf_tx_queue *txq;
426 const struct rte_memzone *mz;
428 uint16_t tx_rs_thresh, tx_free_thresh;
431 PMD_INIT_FUNC_TRACE();
433 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
435 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
436 nb_desc > IAVF_MAX_RING_DESC ||
437 nb_desc < IAVF_MIN_RING_DESC) {
438 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
443 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
444 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
445 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
446 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
447 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
449 /* Free memory if needed. */
450 if (dev->data->tx_queues[queue_idx]) {
451 iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
452 dev->data->tx_queues[queue_idx] = NULL;
455 /* Allocate the TX queue data structure. */
456 txq = rte_zmalloc_socket("iavf txq",
457 sizeof(struct iavf_tx_queue),
461 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
462 "tx queue structure");
466 txq->nb_tx_desc = nb_desc;
467 txq->rs_thresh = tx_rs_thresh;
468 txq->free_thresh = tx_free_thresh;
469 txq->queue_id = queue_idx;
470 txq->port_id = dev->data->port_id;
471 txq->offloads = offloads;
472 txq->tx_deferred_start = tx_conf->tx_deferred_start;
474 /* Allocate software ring */
476 rte_zmalloc_socket("iavf tx sw ring",
477 sizeof(struct iavf_tx_entry) * nb_desc,
481 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
486 /* Allocate TX hardware ring descriptors. */
487 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
488 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
489 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
490 ring_size, IAVF_RING_BASE_ALIGN,
493 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
494 rte_free(txq->sw_ring);
498 txq->tx_ring_phys_addr = mz->iova;
499 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
504 dev->data->tx_queues[queue_idx] = txq;
505 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
506 txq->ops = &def_txq_ops;
508 if (check_tx_vec_allow(txq) == FALSE) {
509 struct iavf_adapter *ad =
510 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
511 ad->tx_vec_allowed = false;
518 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
520 struct iavf_adapter *adapter =
521 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
522 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523 struct iavf_rx_queue *rxq;
526 PMD_DRV_FUNC_TRACE();
528 if (rx_queue_id >= dev->data->nb_rx_queues)
531 rxq = dev->data->rx_queues[rx_queue_id];
533 err = alloc_rxq_mbufs(rxq);
535 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
541 /* Init the RX tail register. */
542 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
543 IAVF_WRITE_FLUSH(hw);
545 /* Ready to switch the queue on */
546 err = iavf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
548 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
551 dev->data->rx_queue_state[rx_queue_id] =
552 RTE_ETH_QUEUE_STATE_STARTED;
558 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
560 struct iavf_adapter *adapter =
561 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
562 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
563 struct iavf_tx_queue *txq;
566 PMD_DRV_FUNC_TRACE();
568 if (tx_queue_id >= dev->data->nb_tx_queues)
571 txq = dev->data->tx_queues[tx_queue_id];
573 /* Init the RX tail register. */
574 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
575 IAVF_WRITE_FLUSH(hw);
577 /* Ready to switch the queue on */
578 err = iavf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
581 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
584 dev->data->tx_queue_state[tx_queue_id] =
585 RTE_ETH_QUEUE_STATE_STARTED;
591 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
593 struct iavf_adapter *adapter =
594 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
595 struct iavf_rx_queue *rxq;
598 PMD_DRV_FUNC_TRACE();
600 if (rx_queue_id >= dev->data->nb_rx_queues)
603 err = iavf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
605 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
610 rxq = dev->data->rx_queues[rx_queue_id];
611 rxq->ops->release_mbufs(rxq);
613 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
619 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
621 struct iavf_adapter *adapter =
622 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
623 struct iavf_tx_queue *txq;
626 PMD_DRV_FUNC_TRACE();
628 if (tx_queue_id >= dev->data->nb_tx_queues)
631 err = iavf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
633 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
638 txq = dev->data->tx_queues[tx_queue_id];
639 txq->ops->release_mbufs(txq);
641 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
647 iavf_dev_rx_queue_release(void *rxq)
649 struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
654 q->ops->release_mbufs(q);
655 rte_free(q->sw_ring);
656 rte_memzone_free(q->mz);
661 iavf_dev_tx_queue_release(void *txq)
663 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
668 q->ops->release_mbufs(q);
669 rte_free(q->sw_ring);
670 rte_memzone_free(q->mz);
675 iavf_stop_queues(struct rte_eth_dev *dev)
677 struct iavf_adapter *adapter =
678 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
679 struct iavf_rx_queue *rxq;
680 struct iavf_tx_queue *txq;
683 /* Stop All queues */
684 ret = iavf_disable_queues(adapter);
686 PMD_DRV_LOG(WARNING, "Fail to stop queues");
688 for (i = 0; i < dev->data->nb_tx_queues; i++) {
689 txq = dev->data->tx_queues[i];
692 txq->ops->release_mbufs(txq);
694 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
696 for (i = 0; i < dev->data->nb_rx_queues; i++) {
697 rxq = dev->data->rx_queues[i];
700 rxq->ops->release_mbufs(rxq);
702 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
707 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
709 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
710 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
711 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
713 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
719 /* Translate the rx descriptor status and error fields to pkt flags */
720 static inline uint64_t
721 iavf_rxd_to_pkt_flags(uint64_t qword)
724 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
726 #define IAVF_RX_ERR_BITS 0x3f
728 /* Check if RSS_HASH */
729 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
730 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
731 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
733 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
734 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
738 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
739 flags |= PKT_RX_IP_CKSUM_BAD;
741 flags |= PKT_RX_IP_CKSUM_GOOD;
743 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
744 flags |= PKT_RX_L4_CKSUM_BAD;
746 flags |= PKT_RX_L4_CKSUM_GOOD;
748 /* TODO: Oversize error bit is not processed here */
753 /* implement recv_pkts */
755 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
757 volatile union iavf_rx_desc *rx_ring;
758 volatile union iavf_rx_desc *rxdp;
759 struct iavf_rx_queue *rxq;
760 union iavf_rx_desc rxd;
761 struct rte_mbuf *rxe;
762 struct rte_eth_dev *dev;
763 struct rte_mbuf *rxm;
764 struct rte_mbuf *nmb;
768 uint16_t rx_packet_len;
769 uint16_t rx_id, nb_hold;
772 static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
774 [1] = RTE_PTYPE_L2_ETHER,
775 /* [2] - [21] reserved */
776 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
778 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
779 RTE_PTYPE_L4_NONFRAG,
780 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
783 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
785 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
787 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
789 /* All others reserved */
795 rx_id = rxq->rx_tail;
796 rx_ring = rxq->rx_ring;
798 while (nb_rx < nb_pkts) {
799 rxdp = &rx_ring[rx_id];
800 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
801 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
802 IAVF_RXD_QW1_STATUS_SHIFT;
804 /* Check the DD bit first */
805 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
807 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
809 nmb = rte_mbuf_raw_alloc(rxq->mp);
810 if (unlikely(!nmb)) {
811 dev = &rte_eth_devices[rxq->port_id];
812 dev->data->rx_mbuf_alloc_failed++;
813 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
814 "queue_id=%u", rxq->port_id, rxq->queue_id);
820 rxe = rxq->sw_ring[rx_id];
822 if (unlikely(rx_id == rxq->nb_rx_desc))
825 /* Prefetch next mbuf */
826 rte_prefetch0(rxq->sw_ring[rx_id]);
828 /* When next RX descriptor is on a cache line boundary,
829 * prefetch the next 4 RX descriptors and next 8 pointers
832 if ((rx_id & 0x3) == 0) {
833 rte_prefetch0(&rx_ring[rx_id]);
834 rte_prefetch0(rxq->sw_ring[rx_id]);
839 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
840 rxdp->read.hdr_addr = 0;
841 rxdp->read.pkt_addr = dma_addr;
843 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
844 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
846 rxm->data_off = RTE_PKTMBUF_HEADROOM;
847 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
850 rxm->pkt_len = rx_packet_len;
851 rxm->data_len = rx_packet_len;
852 rxm->port = rxq->port_id;
854 iavf_rxd_to_vlan_tci(rxm, &rxd);
855 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
857 ptype_tbl[(uint8_t)((qword1 &
858 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
860 if (pkt_flags & PKT_RX_RSS_HASH)
862 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
864 rxm->ol_flags |= pkt_flags;
866 rx_pkts[nb_rx++] = rxm;
868 rxq->rx_tail = rx_id;
870 /* If the number of free RX descriptors is greater than the RX free
871 * threshold of the queue, advance the receive tail register of queue.
872 * Update that register with the value of the last processed RX
873 * descriptor minus 1.
875 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
876 if (nb_hold > rxq->rx_free_thresh) {
877 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
878 "nb_hold=%u nb_rx=%u",
879 rxq->port_id, rxq->queue_id,
880 rx_id, nb_hold, nb_rx);
881 rx_id = (uint16_t)((rx_id == 0) ?
882 (rxq->nb_rx_desc - 1) : (rx_id - 1));
883 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
886 rxq->nb_rx_hold = nb_hold;
891 /* implement recv_scattered_pkts */
893 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
896 struct iavf_rx_queue *rxq = rx_queue;
897 union iavf_rx_desc rxd;
898 struct rte_mbuf *rxe;
899 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
900 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
901 struct rte_mbuf *nmb, *rxm;
902 uint16_t rx_id = rxq->rx_tail;
903 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
904 struct rte_eth_dev *dev;
910 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
911 volatile union iavf_rx_desc *rxdp;
912 static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
914 [1] = RTE_PTYPE_L2_ETHER,
915 /* [2] - [21] reserved */
916 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
918 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
919 RTE_PTYPE_L4_NONFRAG,
920 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
923 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
925 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
927 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
929 /* All others reserved */
932 while (nb_rx < nb_pkts) {
933 rxdp = &rx_ring[rx_id];
934 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
935 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
936 IAVF_RXD_QW1_STATUS_SHIFT;
938 /* Check the DD bit */
939 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
941 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
943 nmb = rte_mbuf_raw_alloc(rxq->mp);
944 if (unlikely(!nmb)) {
945 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
946 "queue_id=%u", rxq->port_id, rxq->queue_id);
947 dev = &rte_eth_devices[rxq->port_id];
948 dev->data->rx_mbuf_alloc_failed++;
954 rxe = rxq->sw_ring[rx_id];
956 if (rx_id == rxq->nb_rx_desc)
959 /* Prefetch next mbuf */
960 rte_prefetch0(rxq->sw_ring[rx_id]);
962 /* When next RX descriptor is on a cache line boundary,
963 * prefetch the next 4 RX descriptors and next 8 pointers
966 if ((rx_id & 0x3) == 0) {
967 rte_prefetch0(&rx_ring[rx_id]);
968 rte_prefetch0(rxq->sw_ring[rx_id]);
974 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
976 /* Set data buffer address and data length of the mbuf */
977 rxdp->read.hdr_addr = 0;
978 rxdp->read.pkt_addr = dma_addr;
979 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
980 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
981 rxm->data_len = rx_packet_len;
982 rxm->data_off = RTE_PKTMBUF_HEADROOM;
984 /* If this is the first buffer of the received packet, set the
985 * pointer to the first mbuf of the packet and initialize its
986 * context. Otherwise, update the total length and the number
987 * of segments of the current scattered packet, and update the
988 * pointer to the last mbuf of the current packet.
992 first_seg->nb_segs = 1;
993 first_seg->pkt_len = rx_packet_len;
996 (uint16_t)(first_seg->pkt_len +
998 first_seg->nb_segs++;
999 last_seg->next = rxm;
1002 /* If this is not the last buffer of the received packet,
1003 * update the pointer to the last mbuf of the current scattered
1004 * packet and continue to parse the RX ring.
1006 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1011 /* This is the last buffer of the received packet. If the CRC
1012 * is not stripped by the hardware:
1013 * - Subtract the CRC length from the total packet length.
1014 * - If the last buffer only contains the whole CRC or a part
1015 * of it, free the mbuf associated to the last buffer. If part
1016 * of the CRC is also contained in the previous mbuf, subtract
1017 * the length of that CRC part from the data length of the
1021 if (unlikely(rxq->crc_len > 0)) {
1022 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1023 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1024 rte_pktmbuf_free_seg(rxm);
1025 first_seg->nb_segs--;
1026 last_seg->data_len =
1027 (uint16_t)(last_seg->data_len -
1028 (RTE_ETHER_CRC_LEN - rx_packet_len));
1029 last_seg->next = NULL;
1031 rxm->data_len = (uint16_t)(rx_packet_len -
1035 first_seg->port = rxq->port_id;
1036 first_seg->ol_flags = 0;
1037 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1038 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1039 first_seg->packet_type =
1040 ptype_tbl[(uint8_t)((qword1 &
1041 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1043 if (pkt_flags & PKT_RX_RSS_HASH)
1044 first_seg->hash.rss =
1045 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1047 first_seg->ol_flags |= pkt_flags;
1049 /* Prefetch data of first segment, if configured to do so. */
1050 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1051 first_seg->data_off));
1052 rx_pkts[nb_rx++] = first_seg;
1056 /* Record index of the next RX descriptor to probe. */
1057 rxq->rx_tail = rx_id;
1058 rxq->pkt_first_seg = first_seg;
1059 rxq->pkt_last_seg = last_seg;
1061 /* If the number of free RX descriptors is greater than the RX free
1062 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1063 * register. Update the RDT with the value of the last processed RX
1064 * descriptor minus 1, to guarantee that the RDT register is never
1065 * equal to the RDH register, which creates a "full" ring situtation
1066 * from the hardware point of view.
1068 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1069 if (nb_hold > rxq->rx_free_thresh) {
1070 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1071 "nb_hold=%u nb_rx=%u",
1072 rxq->port_id, rxq->queue_id,
1073 rx_id, nb_hold, nb_rx);
1074 rx_id = (uint16_t)(rx_id == 0 ?
1075 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1076 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1079 rxq->nb_rx_hold = nb_hold;
1084 #define IAVF_LOOK_AHEAD 8
1086 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1088 volatile union iavf_rx_desc *rxdp;
1089 struct rte_mbuf **rxep;
1090 struct rte_mbuf *mb;
1094 int32_t s[IAVF_LOOK_AHEAD], nb_dd;
1095 int32_t i, j, nb_rx = 0;
1097 static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
1099 [1] = RTE_PTYPE_L2_ETHER,
1100 /* [2] - [21] reserved */
1101 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1103 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1104 RTE_PTYPE_L4_NONFRAG,
1105 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1108 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1110 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1112 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
1114 /* All others reserved */
1117 rxdp = &rxq->rx_ring[rxq->rx_tail];
1118 rxep = &rxq->sw_ring[rxq->rx_tail];
1120 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1121 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1122 IAVF_RXD_QW1_STATUS_SHIFT;
1124 /* Make sure there is at least 1 packet to receive */
1125 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1128 /* Scan LOOK_AHEAD descriptors at a time to determine which
1129 * descriptors reference packets that are ready to be received.
1131 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1132 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1133 /* Read desc statuses backwards to avoid race condition */
1134 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1135 qword1 = rte_le_to_cpu_64(
1136 rxdp[j].wb.qword1.status_error_len);
1137 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1138 IAVF_RXD_QW1_STATUS_SHIFT;
1143 /* Compute how many status bits were set */
1144 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
1145 nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1149 /* Translate descriptor info to mbuf parameters */
1150 for (j = 0; j < nb_dd; j++) {
1151 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1152 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1155 qword1 = rte_le_to_cpu_64
1156 (rxdp[j].wb.qword1.status_error_len);
1157 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1158 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1159 mb->data_len = pkt_len;
1160 mb->pkt_len = pkt_len;
1162 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1163 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1165 ptype_tbl[(uint8_t)((qword1 &
1166 IAVF_RXD_QW1_PTYPE_MASK) >>
1167 IAVF_RXD_QW1_PTYPE_SHIFT)];
1169 if (pkt_flags & PKT_RX_RSS_HASH)
1170 mb->hash.rss = rte_le_to_cpu_32(
1171 rxdp[j].wb.qword0.hi_dword.rss);
1173 mb->ol_flags |= pkt_flags;
1176 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1177 rxq->rx_stage[i + j] = rxep[j];
1179 if (nb_dd != IAVF_LOOK_AHEAD)
1183 /* Clear software ring entries */
1184 for (i = 0; i < nb_rx; i++)
1185 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1190 static inline uint16_t
1191 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
1192 struct rte_mbuf **rx_pkts,
1196 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1198 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1200 for (i = 0; i < nb_pkts; i++)
1201 rx_pkts[i] = stage[i];
1203 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1204 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1210 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
1212 volatile union iavf_rx_desc *rxdp;
1213 struct rte_mbuf **rxep;
1214 struct rte_mbuf *mb;
1215 uint16_t alloc_idx, i;
1219 /* Allocate buffers in bulk */
1220 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1221 (rxq->rx_free_thresh - 1));
1222 rxep = &rxq->sw_ring[alloc_idx];
1223 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1224 rxq->rx_free_thresh);
1225 if (unlikely(diag != 0)) {
1226 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1230 rxdp = &rxq->rx_ring[alloc_idx];
1231 for (i = 0; i < rxq->rx_free_thresh; i++) {
1232 if (likely(i < (rxq->rx_free_thresh - 1)))
1233 /* Prefetch next mbuf */
1234 rte_prefetch0(rxep[i + 1]);
1237 rte_mbuf_refcnt_set(mb, 1);
1239 mb->data_off = RTE_PKTMBUF_HEADROOM;
1241 mb->port = rxq->port_id;
1242 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1243 rxdp[i].read.hdr_addr = 0;
1244 rxdp[i].read.pkt_addr = dma_addr;
1247 /* Update rx tail register */
1249 IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
1251 rxq->rx_free_trigger =
1252 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1253 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1254 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1259 static inline uint16_t
1260 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1262 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
1268 if (rxq->rx_nb_avail)
1269 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1271 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
1272 rxq->rx_next_avail = 0;
1273 rxq->rx_nb_avail = nb_rx;
1274 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1276 if (rxq->rx_tail > rxq->rx_free_trigger) {
1277 if (iavf_rx_alloc_bufs(rxq) != 0) {
1280 /* TODO: count rx_mbuf_alloc_failed here */
1282 rxq->rx_nb_avail = 0;
1283 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1284 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1285 rxq->sw_ring[j] = rxq->rx_stage[i];
1291 if (rxq->rx_tail >= rxq->nb_rx_desc)
1294 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
1295 rxq->port_id, rxq->queue_id,
1296 rxq->rx_tail, nb_rx);
1298 if (rxq->rx_nb_avail)
1299 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1305 iavf_recv_pkts_bulk_alloc(void *rx_queue,
1306 struct rte_mbuf **rx_pkts,
1309 uint16_t nb_rx = 0, n, count;
1311 if (unlikely(nb_pkts == 0))
1314 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
1315 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1318 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
1319 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1320 nb_rx = (uint16_t)(nb_rx + count);
1321 nb_pkts = (uint16_t)(nb_pkts - count);
1330 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
1332 struct iavf_tx_entry *sw_ring = txq->sw_ring;
1333 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1334 uint16_t nb_tx_desc = txq->nb_tx_desc;
1335 uint16_t desc_to_clean_to;
1336 uint16_t nb_tx_to_clean;
1338 volatile struct iavf_tx_desc *txd = txq->tx_ring;
1340 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1341 if (desc_to_clean_to >= nb_tx_desc)
1342 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1344 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1345 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1346 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
1347 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
1348 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1349 "(port=%d queue=%d)", desc_to_clean_to,
1350 txq->port_id, txq->queue_id);
1354 if (last_desc_cleaned > desc_to_clean_to)
1355 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1358 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1361 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1363 txq->last_desc_cleaned = desc_to_clean_to;
1364 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1369 /* Check if the context descriptor is needed for TX offloading */
1370 static inline uint16_t
1371 iavf_calc_context_desc(uint64_t flags)
1373 static uint64_t mask = PKT_TX_TCP_SEG;
1375 return (flags & mask) ? 1 : 0;
1379 iavf_txd_enable_checksum(uint64_t ol_flags,
1381 uint32_t *td_offset,
1382 union iavf_tx_offload tx_offload)
1385 *td_offset |= (tx_offload.l2_len >> 1) <<
1386 IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1388 /* Enable L3 checksum offloads */
1389 if (ol_flags & PKT_TX_IP_CKSUM) {
1390 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1391 *td_offset |= (tx_offload.l3_len >> 2) <<
1392 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1393 } else if (ol_flags & PKT_TX_IPV4) {
1394 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
1395 *td_offset |= (tx_offload.l3_len >> 2) <<
1396 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1397 } else if (ol_flags & PKT_TX_IPV6) {
1398 *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1399 *td_offset |= (tx_offload.l3_len >> 2) <<
1400 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1403 if (ol_flags & PKT_TX_TCP_SEG) {
1404 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1405 *td_offset |= (tx_offload.l4_len >> 2) <<
1406 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1410 /* Enable L4 checksum offloads */
1411 switch (ol_flags & PKT_TX_L4_MASK) {
1412 case PKT_TX_TCP_CKSUM:
1413 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1414 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
1415 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1417 case PKT_TX_SCTP_CKSUM:
1418 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1419 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
1420 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1422 case PKT_TX_UDP_CKSUM:
1423 *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1424 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
1425 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1432 /* set TSO context descriptor
1433 * support IP -> L4 and IP -> IP -> L4
1435 static inline uint64_t
1436 iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
1438 uint64_t ctx_desc = 0;
1439 uint32_t cd_cmd, hdr_len, cd_tso_len;
1441 if (!tx_offload.l4_len) {
1442 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1446 hdr_len = tx_offload.l2_len +
1450 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1451 cd_tso_len = mbuf->pkt_len - hdr_len;
1452 ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1453 ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1454 ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1459 /* Construct the tx flags */
1460 static inline uint64_t
1461 iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1464 return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
1465 ((uint64_t)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
1466 ((uint64_t)td_offset <<
1467 IAVF_TXD_QW1_OFFSET_SHIFT) |
1469 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1470 ((uint64_t)td_tag <<
1471 IAVF_TXD_QW1_L2TAG1_SHIFT));
1476 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1478 volatile struct iavf_tx_desc *txd;
1479 volatile struct iavf_tx_desc *txr;
1480 struct iavf_tx_queue *txq;
1481 struct iavf_tx_entry *sw_ring;
1482 struct iavf_tx_entry *txe, *txn;
1483 struct rte_mbuf *tx_pkt;
1484 struct rte_mbuf *m_seg;
1495 uint64_t buf_dma_addr;
1496 union iavf_tx_offload tx_offload = {0};
1499 sw_ring = txq->sw_ring;
1501 tx_id = txq->tx_tail;
1502 txe = &sw_ring[tx_id];
1504 /* Check if the descriptor ring needs to be cleaned. */
1505 if (txq->nb_free < txq->free_thresh)
1506 iavf_xmit_cleanup(txq);
1508 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1513 tx_pkt = *tx_pkts++;
1514 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1516 ol_flags = tx_pkt->ol_flags;
1517 tx_offload.l2_len = tx_pkt->l2_len;
1518 tx_offload.l3_len = tx_pkt->l3_len;
1519 tx_offload.l4_len = tx_pkt->l4_len;
1520 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1522 /* Calculate the number of context descriptors needed. */
1523 nb_ctx = iavf_calc_context_desc(ol_flags);
1525 /* The number of descriptors that must be allocated for
1526 * a packet equals to the number of the segments of that
1527 * packet plus 1 context descriptor if needed.
1529 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1530 tx_last = (uint16_t)(tx_id + nb_used - 1);
1533 if (tx_last >= txq->nb_tx_desc)
1534 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1536 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1537 " tx_first=%u tx_last=%u",
1538 txq->port_id, txq->queue_id, tx_id, tx_last);
1540 if (nb_used > txq->nb_free) {
1541 if (iavf_xmit_cleanup(txq)) {
1546 if (unlikely(nb_used > txq->rs_thresh)) {
1547 while (nb_used > txq->nb_free) {
1548 if (iavf_xmit_cleanup(txq)) {
1557 /* Descriptor based VLAN insertion */
1558 if (ol_flags & PKT_TX_VLAN_PKT) {
1559 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
1560 td_tag = tx_pkt->vlan_tci;
1563 /* According to datasheet, the bit2 is reserved and must be
1568 /* Enable checksum offloading */
1569 if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
1570 iavf_txd_enable_checksum(ol_flags, &td_cmd,
1571 &td_offset, tx_offload);
1574 /* Setup TX context descriptor if required */
1575 uint64_t cd_type_cmd_tso_mss =
1576 IAVF_TX_DESC_DTYPE_CONTEXT;
1577 volatile struct iavf_tx_context_desc *ctx_txd =
1578 (volatile struct iavf_tx_context_desc *)
1581 txn = &sw_ring[txe->next_id];
1582 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1584 rte_pktmbuf_free_seg(txe->mbuf);
1589 if (ol_flags & PKT_TX_TCP_SEG)
1590 cd_type_cmd_tso_mss |=
1591 iavf_set_tso_ctx(tx_pkt, tx_offload);
1593 ctx_txd->type_cmd_tso_mss =
1594 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
1596 IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
1597 txe->last_id = tx_last;
1598 tx_id = txe->next_id;
1605 txn = &sw_ring[txe->next_id];
1608 rte_pktmbuf_free_seg(txe->mbuf);
1611 /* Setup TX Descriptor */
1612 slen = m_seg->data_len;
1613 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1614 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1615 txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
1620 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
1621 txe->last_id = tx_last;
1622 tx_id = txe->next_id;
1624 m_seg = m_seg->next;
1627 /* The last packet data descriptor needs End Of Packet (EOP) */
1628 td_cmd |= IAVF_TX_DESC_CMD_EOP;
1629 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
1630 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
1632 if (txq->nb_used >= txq->rs_thresh) {
1633 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
1634 "%4u (port=%d queue=%d)",
1635 tx_last, txq->port_id, txq->queue_id);
1637 td_cmd |= IAVF_TX_DESC_CMD_RS;
1639 /* Update txq RS bit counters */
1643 txd->cmd_type_offset_bsz |=
1644 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1645 IAVF_TXD_QW1_CMD_SHIFT);
1646 IAVF_DUMP_TX_DESC(txq, txd, tx_id);
1652 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1653 txq->port_id, txq->queue_id, tx_id, nb_tx);
1655 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
1656 txq->tx_tail = tx_id;
1661 /* TX prep functions */
1663 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1670 for (i = 0; i < nb_pkts; i++) {
1672 ol_flags = m->ol_flags;
1674 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
1675 if (!(ol_flags & PKT_TX_TCP_SEG)) {
1676 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
1680 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
1681 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
1682 /* MSS outside the range are considered malicious */
1687 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
1688 rte_errno = ENOTSUP;
1692 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1693 ret = rte_validate_tx_offload(m);
1699 ret = rte_net_intel_cksum_prepare(m);
1709 /* choose rx function*/
1711 iavf_set_rx_function(struct rte_eth_dev *dev)
1713 struct iavf_adapter *adapter =
1714 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1716 struct iavf_rx_queue *rxq;
1718 bool use_avx2 = false;
1720 if (!iavf_rx_vec_dev_check(dev)) {
1721 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1722 rxq = dev->data->rx_queues[i];
1723 (void)iavf_rxq_vec_setup(rxq);
1726 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
1727 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
1730 if (dev->data->scattered_rx) {
1732 "Using %sVector Scattered Rx (port %d).",
1733 use_avx2 ? "avx2 " : "",
1734 dev->data->port_id);
1735 dev->rx_pkt_burst = use_avx2 ?
1736 iavf_recv_scattered_pkts_vec_avx2 :
1737 iavf_recv_scattered_pkts_vec;
1739 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
1740 use_avx2 ? "avx2 " : "",
1741 dev->data->port_id);
1742 dev->rx_pkt_burst = use_avx2 ?
1743 iavf_recv_pkts_vec_avx2 :
1751 if (dev->data->scattered_rx) {
1752 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
1753 dev->data->port_id);
1754 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
1755 } else if (adapter->rx_bulk_alloc_allowed) {
1756 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
1757 dev->data->port_id);
1758 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
1760 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
1761 dev->data->port_id);
1762 dev->rx_pkt_burst = iavf_recv_pkts;
1766 /* choose tx function*/
1768 iavf_set_tx_function(struct rte_eth_dev *dev)
1771 struct iavf_tx_queue *txq;
1773 bool use_avx2 = false;
1775 if (!iavf_tx_vec_dev_check(dev)) {
1776 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1777 txq = dev->data->tx_queues[i];
1780 iavf_txq_vec_setup(txq);
1783 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
1784 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
1787 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
1788 use_avx2 ? "avx2 " : "",
1789 dev->data->port_id);
1790 dev->tx_pkt_burst = use_avx2 ?
1791 iavf_xmit_pkts_vec_avx2 :
1793 dev->tx_pkt_prepare = NULL;
1799 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
1800 dev->data->port_id);
1801 dev->tx_pkt_burst = iavf_xmit_pkts;
1802 dev->tx_pkt_prepare = iavf_prep_pkts;
1806 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1807 struct rte_eth_rxq_info *qinfo)
1809 struct iavf_rx_queue *rxq;
1811 rxq = dev->data->rx_queues[queue_id];
1813 qinfo->mp = rxq->mp;
1814 qinfo->scattered_rx = dev->data->scattered_rx;
1815 qinfo->nb_desc = rxq->nb_rx_desc;
1817 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1818 qinfo->conf.rx_drop_en = TRUE;
1819 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1823 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1824 struct rte_eth_txq_info *qinfo)
1826 struct iavf_tx_queue *txq;
1828 txq = dev->data->tx_queues[queue_id];
1830 qinfo->nb_desc = txq->nb_tx_desc;
1832 qinfo->conf.tx_free_thresh = txq->free_thresh;
1833 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
1834 qinfo->conf.offloads = txq->offloads;
1835 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1838 /* Get the number of used descriptors of a rx queue */
1840 iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
1842 #define IAVF_RXQ_SCAN_INTERVAL 4
1843 volatile union iavf_rx_desc *rxdp;
1844 struct iavf_rx_queue *rxq;
1847 rxq = dev->data->rx_queues[queue_id];
1848 rxdp = &rxq->rx_ring[rxq->rx_tail];
1849 while ((desc < rxq->nb_rx_desc) &&
1850 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1851 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
1852 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
1853 /* Check the DD bit of a rx descriptor of each 4 in a group,
1854 * to avoid checking too frequently and downgrading performance
1857 desc += IAVF_RXQ_SCAN_INTERVAL;
1858 rxdp += IAVF_RXQ_SCAN_INTERVAL;
1859 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1860 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1861 desc - rxq->nb_rx_desc]);
1868 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
1870 struct iavf_rx_queue *rxq = rx_queue;
1871 volatile uint64_t *status;
1875 if (unlikely(offset >= rxq->nb_rx_desc))
1878 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1879 return RTE_ETH_RX_DESC_UNAVAIL;
1881 desc = rxq->rx_tail + offset;
1882 if (desc >= rxq->nb_rx_desc)
1883 desc -= rxq->nb_rx_desc;
1885 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1886 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
1887 << IAVF_RXD_QW1_STATUS_SHIFT);
1889 return RTE_ETH_RX_DESC_DONE;
1891 return RTE_ETH_RX_DESC_AVAIL;
1895 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
1897 struct iavf_tx_queue *txq = tx_queue;
1898 volatile uint64_t *status;
1899 uint64_t mask, expect;
1902 if (unlikely(offset >= txq->nb_tx_desc))
1905 desc = txq->tx_tail + offset;
1906 /* go to next desc that has the RS bit */
1907 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
1909 if (desc >= txq->nb_tx_desc) {
1910 desc -= txq->nb_tx_desc;
1911 if (desc >= txq->nb_tx_desc)
1912 desc -= txq->nb_tx_desc;
1915 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1916 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
1917 expect = rte_cpu_to_le_64(
1918 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
1919 if ((*status & mask) == expect)
1920 return RTE_ETH_TX_DESC_DONE;
1922 return RTE_ETH_TX_DESC_FULL;