1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev.h>
28 #include "base/avf_prototype.h"
29 #include "base/avf_type.h"
34 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
36 /* The following constraints must be satisfied:
37 * thresh < rxq->nb_rx_desc
39 if (thresh >= nb_desc) {
40 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
48 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
49 uint16_t tx_free_thresh)
51 /* TX descriptors will have their RS bit set after tx_rs_thresh
52 * descriptors have been used. The TX descriptor ring will be cleaned
53 * after tx_free_thresh descriptors are used or if the number of
54 * descriptors required to transmit a packet is greater than the
55 * number of free TX descriptors.
57 * The following constraints must be satisfied:
58 * - tx_rs_thresh must be less than the size of the ring minus 2.
59 * - tx_free_thresh must be less than the size of the ring minus 3.
60 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
61 * - tx_rs_thresh must be a divisor of the ring size.
63 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
64 * race condition, hence the maximum threshold constraints. When set
65 * to zero use default values.
67 if (tx_rs_thresh >= (nb_desc - 2)) {
68 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
69 "number of TX descriptors (%u) minus 2",
70 tx_rs_thresh, nb_desc);
73 if (tx_free_thresh >= (nb_desc - 3)) {
74 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
75 "number of TX descriptors (%u) minus 3.",
76 tx_free_thresh, nb_desc);
79 if (tx_rs_thresh > tx_free_thresh) {
80 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
81 "equal to tx_free_thresh (%u).",
82 tx_rs_thresh, tx_free_thresh);
85 if ((nb_desc % tx_rs_thresh) != 0) {
86 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
87 "number of TX descriptors (%u).",
88 tx_rs_thresh, nb_desc);
95 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
97 check_rx_vec_allow(struct avf_rx_queue *rxq)
99 if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
100 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
101 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
105 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
110 check_tx_vec_allow(struct avf_tx_queue *txq)
112 if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
113 txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
114 txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
115 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
118 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
124 reset_rx_queue(struct avf_rx_queue *rxq)
131 len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
133 for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
134 ((volatile char *)rxq->rx_ring)[i] = 0;
136 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
138 for (i = 0; i < AVF_RX_MAX_BURST; i++)
139 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
143 rxq->pkt_first_seg = NULL;
144 rxq->pkt_last_seg = NULL;
148 reset_tx_queue(struct avf_tx_queue *txq)
150 struct avf_tx_entry *txe;
151 uint16_t i, prev, size;
154 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
159 size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
160 for (i = 0; i < size; i++)
161 ((volatile char *)txq->tx_ring)[i] = 0;
163 prev = (uint16_t)(txq->nb_tx_desc - 1);
164 for (i = 0; i < txq->nb_tx_desc; i++) {
165 txq->tx_ring[i].cmd_type_offset_bsz =
166 rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
169 txe[prev].next_id = i;
176 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
177 txq->nb_free = txq->nb_tx_desc - 1;
179 txq->next_dd = txq->rs_thresh - 1;
180 txq->next_rs = txq->rs_thresh - 1;
184 alloc_rxq_mbufs(struct avf_rx_queue *rxq)
186 volatile union avf_rx_desc *rxd;
187 struct rte_mbuf *mbuf = NULL;
191 for (i = 0; i < rxq->nb_rx_desc; i++) {
192 mbuf = rte_mbuf_raw_alloc(rxq->mp);
193 if (unlikely(!mbuf)) {
194 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
198 rte_mbuf_refcnt_set(mbuf, 1);
200 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
202 mbuf->port = rxq->port_id;
205 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
207 rxd = &rxq->rx_ring[i];
208 rxd->read.pkt_addr = dma_addr;
209 rxd->read.hdr_addr = 0;
210 #ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
215 rxq->sw_ring[i] = mbuf;
222 release_rxq_mbufs(struct avf_rx_queue *rxq)
224 struct rte_mbuf *mbuf;
230 for (i = 0; i < rxq->nb_rx_desc; i++) {
231 if (rxq->sw_ring[i]) {
232 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
233 rxq->sw_ring[i] = NULL;
239 release_txq_mbufs(struct avf_tx_queue *txq)
243 if (!txq || !txq->sw_ring) {
244 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
248 for (i = 0; i < txq->nb_tx_desc; i++) {
249 if (txq->sw_ring[i].mbuf) {
250 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
251 txq->sw_ring[i].mbuf = NULL;
256 static const struct avf_rxq_ops def_rxq_ops = {
257 .release_mbufs = release_rxq_mbufs,
260 static const struct avf_txq_ops def_txq_ops = {
261 .release_mbufs = release_txq_mbufs,
265 avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
266 uint16_t nb_desc, unsigned int socket_id,
267 const struct rte_eth_rxconf *rx_conf,
268 struct rte_mempool *mp)
270 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
271 struct avf_adapter *ad =
272 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
273 struct avf_rx_queue *rxq;
274 const struct rte_memzone *mz;
277 uint16_t rx_free_thresh;
278 uint16_t base, bsf, tc_mapping;
280 PMD_INIT_FUNC_TRACE();
282 if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
283 nb_desc > AVF_MAX_RING_DESC ||
284 nb_desc < AVF_MIN_RING_DESC) {
285 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
290 /* Check free threshold */
291 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
292 AVF_DEFAULT_RX_FREE_THRESH :
293 rx_conf->rx_free_thresh;
294 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
297 /* Free memory if needed */
298 if (dev->data->rx_queues[queue_idx]) {
299 avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
300 dev->data->rx_queues[queue_idx] = NULL;
303 /* Allocate the rx queue data structure */
304 rxq = rte_zmalloc_socket("avf rxq",
305 sizeof(struct avf_rx_queue),
309 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
310 "rx queue data structure");
315 rxq->nb_rx_desc = nb_desc;
316 rxq->rx_free_thresh = rx_free_thresh;
317 rxq->queue_id = queue_idx;
318 rxq->port_id = dev->data->port_id;
319 rxq->crc_len = 0; /* crc stripping by default */
320 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
323 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
324 rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
326 /* Allocate the software ring. */
327 len = nb_desc + AVF_RX_MAX_BURST;
329 rte_zmalloc_socket("avf rx sw ring",
330 sizeof(struct rte_mbuf *) * len,
334 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
339 /* Allocate the maximun number of RX ring hardware descriptor with
340 * a liitle more to support bulk allocate.
342 len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
343 ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
345 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
346 ring_size, AVF_RING_BASE_ALIGN,
349 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
350 rte_free(rxq->sw_ring);
354 /* Zero all the descriptors in the ring. */
355 memset(mz->addr, 0, ring_size);
356 rxq->rx_ring_phys_addr = mz->iova;
357 rxq->rx_ring = (union avf_rx_desc *)mz->addr;
362 dev->data->rx_queues[queue_idx] = rxq;
363 rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
364 rxq->ops = &def_rxq_ops;
366 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
367 if (check_rx_vec_allow(rxq) == FALSE)
368 ad->rx_vec_allowed = false;
374 avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
377 unsigned int socket_id,
378 const struct rte_eth_txconf *tx_conf)
380 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
381 struct avf_adapter *ad =
382 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
383 struct avf_tx_queue *txq;
384 const struct rte_memzone *mz;
386 uint16_t tx_rs_thresh, tx_free_thresh;
387 uint16_t i, base, bsf, tc_mapping;
389 PMD_INIT_FUNC_TRACE();
391 if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
392 nb_desc > AVF_MAX_RING_DESC ||
393 nb_desc < AVF_MIN_RING_DESC) {
394 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
399 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
400 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
401 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
402 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
403 check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh);
405 /* Free memory if needed. */
406 if (dev->data->tx_queues[queue_idx]) {
407 avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
408 dev->data->tx_queues[queue_idx] = NULL;
411 /* Allocate the TX queue data structure. */
412 txq = rte_zmalloc_socket("avf txq",
413 sizeof(struct avf_tx_queue),
417 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
418 "tx queue structure");
422 txq->nb_tx_desc = nb_desc;
423 txq->rs_thresh = tx_rs_thresh;
424 txq->free_thresh = tx_free_thresh;
425 txq->queue_id = queue_idx;
426 txq->port_id = dev->data->port_id;
427 txq->txq_flags = tx_conf->txq_flags;
428 txq->tx_deferred_start = tx_conf->tx_deferred_start;
430 /* Allocate software ring */
432 rte_zmalloc_socket("avf tx sw ring",
433 sizeof(struct avf_tx_entry) * nb_desc,
437 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
442 /* Allocate TX hardware ring descriptors. */
443 ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
444 ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
445 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
446 ring_size, AVF_RING_BASE_ALIGN,
449 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
450 rte_free(txq->sw_ring);
454 txq->tx_ring_phys_addr = mz->iova;
455 txq->tx_ring = (struct avf_tx_desc *)mz->addr;
460 dev->data->tx_queues[queue_idx] = txq;
461 txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
462 txq->ops = &def_txq_ops;
464 #ifdef RTE_LIBRTE_AVF_INC_VECTOR
465 if (check_tx_vec_allow(txq) == FALSE)
466 ad->tx_vec_allowed = false;
473 avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
475 struct avf_adapter *adapter =
476 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
477 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
478 struct avf_rx_queue *rxq;
481 PMD_DRV_FUNC_TRACE();
483 if (rx_queue_id >= dev->data->nb_rx_queues)
486 rxq = dev->data->rx_queues[rx_queue_id];
488 err = alloc_rxq_mbufs(rxq);
490 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
496 /* Init the RX tail register. */
497 AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
500 /* Ready to switch the queue on */
501 err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
503 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
506 dev->data->rx_queue_state[rx_queue_id] =
507 RTE_ETH_QUEUE_STATE_STARTED;
513 avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
515 struct avf_adapter *adapter =
516 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
517 struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518 struct avf_tx_queue *txq;
521 PMD_DRV_FUNC_TRACE();
523 if (tx_queue_id >= dev->data->nb_tx_queues)
526 txq = dev->data->tx_queues[tx_queue_id];
528 /* Init the RX tail register. */
529 AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
532 /* Ready to switch the queue on */
533 err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
536 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
539 dev->data->tx_queue_state[tx_queue_id] =
540 RTE_ETH_QUEUE_STATE_STARTED;
546 avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
548 struct avf_adapter *adapter =
549 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
550 struct avf_rx_queue *rxq;
553 PMD_DRV_FUNC_TRACE();
555 if (rx_queue_id >= dev->data->nb_rx_queues)
558 err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
560 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
565 rxq = dev->data->rx_queues[rx_queue_id];
566 rxq->ops->release_mbufs(rxq);
568 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
574 avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
576 struct avf_adapter *adapter =
577 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
578 struct avf_tx_queue *txq;
581 PMD_DRV_FUNC_TRACE();
583 if (tx_queue_id >= dev->data->nb_tx_queues)
586 err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
588 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
593 txq = dev->data->tx_queues[tx_queue_id];
594 txq->ops->release_mbufs(txq);
596 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
602 avf_dev_rx_queue_release(void *rxq)
604 struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
609 q->ops->release_mbufs(q);
610 rte_free(q->sw_ring);
611 rte_memzone_free(q->mz);
616 avf_dev_tx_queue_release(void *txq)
618 struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
623 q->ops->release_mbufs(q);
624 rte_free(q->sw_ring);
625 rte_memzone_free(q->mz);
630 avf_stop_queues(struct rte_eth_dev *dev)
632 struct avf_adapter *adapter =
633 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
634 struct avf_rx_queue *rxq;
635 struct avf_tx_queue *txq;
638 /* Stop All queues */
639 ret = avf_disable_queues(adapter);
641 PMD_DRV_LOG(WARNING, "Fail to stop queues");
643 for (i = 0; i < dev->data->nb_tx_queues; i++) {
644 txq = dev->data->tx_queues[i];
647 txq->ops->release_mbufs(txq);
649 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
651 for (i = 0; i < dev->data->nb_rx_queues; i++) {
652 rxq = dev->data->rx_queues[i];
655 rxq->ops->release_mbufs(rxq);
657 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
662 avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
664 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
665 (1 << AVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
666 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
668 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
674 /* Translate the rx descriptor status and error fields to pkt flags */
675 static inline uint64_t
676 avf_rxd_to_pkt_flags(uint64_t qword)
679 uint64_t error_bits = (qword >> AVF_RXD_QW1_ERROR_SHIFT);
681 #define AVF_RX_ERR_BITS 0x3f
683 /* Check if RSS_HASH */
684 flags = (((qword >> AVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
685 AVF_RX_DESC_FLTSTAT_RSS_HASH) ==
686 AVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
688 if (likely((error_bits & AVF_RX_ERR_BITS) == 0)) {
689 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
693 if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_IPE_SHIFT)))
694 flags |= PKT_RX_IP_CKSUM_BAD;
696 flags |= PKT_RX_IP_CKSUM_GOOD;
698 if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_L4E_SHIFT)))
699 flags |= PKT_RX_L4_CKSUM_BAD;
701 flags |= PKT_RX_L4_CKSUM_GOOD;
703 /* TODO: Oversize error bit is not processed here */
708 /* implement recv_pkts */
710 avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
712 volatile union avf_rx_desc *rx_ring;
713 volatile union avf_rx_desc *rxdp;
714 struct avf_rx_queue *rxq;
715 union avf_rx_desc rxd;
716 struct rte_mbuf *rxe;
717 struct rte_eth_dev *dev;
718 struct rte_mbuf *rxm;
719 struct rte_mbuf *nmb;
723 uint16_t rx_packet_len;
724 uint16_t rx_id, nb_hold;
727 static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
729 [1] = RTE_PTYPE_L2_ETHER,
730 /* [2] - [21] reserved */
731 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
733 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
734 RTE_PTYPE_L4_NONFRAG,
735 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
738 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
740 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
742 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
744 /* All others reserved */
750 rx_id = rxq->rx_tail;
751 rx_ring = rxq->rx_ring;
753 while (nb_rx < nb_pkts) {
754 rxdp = &rx_ring[rx_id];
755 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
756 rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
757 AVF_RXD_QW1_STATUS_SHIFT;
759 /* Check the DD bit first */
760 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
762 AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
764 nmb = rte_mbuf_raw_alloc(rxq->mp);
765 if (unlikely(!nmb)) {
766 dev = &rte_eth_devices[rxq->port_id];
767 dev->data->rx_mbuf_alloc_failed++;
768 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
769 "queue_id=%u", rxq->port_id, rxq->queue_id);
775 rxe = rxq->sw_ring[rx_id];
777 if (unlikely(rx_id == rxq->nb_rx_desc))
780 /* Prefetch next mbuf */
781 rte_prefetch0(rxq->sw_ring[rx_id]);
783 /* When next RX descriptor is on a cache line boundary,
784 * prefetch the next 4 RX descriptors and next 8 pointers
787 if ((rx_id & 0x3) == 0) {
788 rte_prefetch0(&rx_ring[rx_id]);
789 rte_prefetch0(rxq->sw_ring[rx_id]);
794 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
795 rxdp->read.hdr_addr = 0;
796 rxdp->read.pkt_addr = dma_addr;
798 rx_packet_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
799 AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
801 rxm->data_off = RTE_PKTMBUF_HEADROOM;
802 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
805 rxm->pkt_len = rx_packet_len;
806 rxm->data_len = rx_packet_len;
807 rxm->port = rxq->port_id;
809 avf_rxd_to_vlan_tci(rxm, &rxd);
810 pkt_flags = avf_rxd_to_pkt_flags(qword1);
812 ptype_tbl[(uint8_t)((qword1 &
813 AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
815 if (pkt_flags & PKT_RX_RSS_HASH)
817 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
819 rxm->ol_flags |= pkt_flags;
821 rx_pkts[nb_rx++] = rxm;
823 rxq->rx_tail = rx_id;
825 /* If the number of free RX descriptors is greater than the RX free
826 * threshold of the queue, advance the receive tail register of queue.
827 * Update that register with the value of the last processed RX
828 * descriptor minus 1.
830 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
831 if (nb_hold > rxq->rx_free_thresh) {
832 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
833 "nb_hold=%u nb_rx=%u",
834 rxq->port_id, rxq->queue_id,
835 rx_id, nb_hold, nb_rx);
836 rx_id = (uint16_t)((rx_id == 0) ?
837 (rxq->nb_rx_desc - 1) : (rx_id - 1));
838 AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
841 rxq->nb_rx_hold = nb_hold;
846 /* implement recv_scattered_pkts */
848 avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
851 struct avf_rx_queue *rxq = rx_queue;
852 union avf_rx_desc rxd;
853 struct rte_mbuf *rxe;
854 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
855 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
856 struct rte_mbuf *nmb, *rxm;
857 uint16_t rx_id = rxq->rx_tail;
858 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
859 struct rte_eth_dev *dev;
865 volatile union avf_rx_desc *rx_ring = rxq->rx_ring;
866 volatile union avf_rx_desc *rxdp;
867 static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
869 [1] = RTE_PTYPE_L2_ETHER,
870 /* [2] - [21] reserved */
871 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
873 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
874 RTE_PTYPE_L4_NONFRAG,
875 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
878 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
880 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
882 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
884 /* All others reserved */
887 while (nb_rx < nb_pkts) {
888 rxdp = &rx_ring[rx_id];
889 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
890 rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
891 AVF_RXD_QW1_STATUS_SHIFT;
893 /* Check the DD bit */
894 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
896 AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
898 nmb = rte_mbuf_raw_alloc(rxq->mp);
899 if (unlikely(!nmb)) {
900 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
901 "queue_id=%u", rxq->port_id, rxq->queue_id);
902 dev = &rte_eth_devices[rxq->port_id];
903 dev->data->rx_mbuf_alloc_failed++;
909 rxe = rxq->sw_ring[rx_id];
911 if (rx_id == rxq->nb_rx_desc)
914 /* Prefetch next mbuf */
915 rte_prefetch0(rxq->sw_ring[rx_id]);
917 /* When next RX descriptor is on a cache line boundary,
918 * prefetch the next 4 RX descriptors and next 8 pointers
921 if ((rx_id & 0x3) == 0) {
922 rte_prefetch0(&rx_ring[rx_id]);
923 rte_prefetch0(rxq->sw_ring[rx_id]);
929 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
931 /* Set data buffer address and data length of the mbuf */
932 rxdp->read.hdr_addr = 0;
933 rxdp->read.pkt_addr = dma_addr;
934 rx_packet_len = (qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
935 AVF_RXD_QW1_LENGTH_PBUF_SHIFT;
936 rxm->data_len = rx_packet_len;
937 rxm->data_off = RTE_PKTMBUF_HEADROOM;
939 /* If this is the first buffer of the received packet, set the
940 * pointer to the first mbuf of the packet and initialize its
941 * context. Otherwise, update the total length and the number
942 * of segments of the current scattered packet, and update the
943 * pointer to the last mbuf of the current packet.
947 first_seg->nb_segs = 1;
948 first_seg->pkt_len = rx_packet_len;
951 (uint16_t)(first_seg->pkt_len +
953 first_seg->nb_segs++;
954 last_seg->next = rxm;
957 /* If this is not the last buffer of the received packet,
958 * update the pointer to the last mbuf of the current scattered
959 * packet and continue to parse the RX ring.
961 if (!(rx_status & (1 << AVF_RX_DESC_STATUS_EOF_SHIFT))) {
966 /* This is the last buffer of the received packet. If the CRC
967 * is not stripped by the hardware:
968 * - Subtract the CRC length from the total packet length.
969 * - If the last buffer only contains the whole CRC or a part
970 * of it, free the mbuf associated to the last buffer. If part
971 * of the CRC is also contained in the previous mbuf, subtract
972 * the length of that CRC part from the data length of the
976 if (unlikely(rxq->crc_len > 0)) {
977 first_seg->pkt_len -= ETHER_CRC_LEN;
978 if (rx_packet_len <= ETHER_CRC_LEN) {
979 rte_pktmbuf_free_seg(rxm);
980 first_seg->nb_segs--;
982 (uint16_t)(last_seg->data_len -
983 (ETHER_CRC_LEN - rx_packet_len));
984 last_seg->next = NULL;
986 rxm->data_len = (uint16_t)(rx_packet_len -
990 first_seg->port = rxq->port_id;
991 first_seg->ol_flags = 0;
992 avf_rxd_to_vlan_tci(first_seg, &rxd);
993 pkt_flags = avf_rxd_to_pkt_flags(qword1);
994 first_seg->packet_type =
995 ptype_tbl[(uint8_t)((qword1 &
996 AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
998 if (pkt_flags & PKT_RX_RSS_HASH)
999 first_seg->hash.rss =
1000 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1002 first_seg->ol_flags |= pkt_flags;
1004 /* Prefetch data of first segment, if configured to do so. */
1005 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1006 first_seg->data_off));
1007 rx_pkts[nb_rx++] = first_seg;
1011 /* Record index of the next RX descriptor to probe. */
1012 rxq->rx_tail = rx_id;
1013 rxq->pkt_first_seg = first_seg;
1014 rxq->pkt_last_seg = last_seg;
1016 /* If the number of free RX descriptors is greater than the RX free
1017 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1018 * register. Update the RDT with the value of the last processed RX
1019 * descriptor minus 1, to guarantee that the RDT register is never
1020 * equal to the RDH register, which creates a "full" ring situtation
1021 * from the hardware point of view.
1023 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1024 if (nb_hold > rxq->rx_free_thresh) {
1025 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1026 "nb_hold=%u nb_rx=%u",
1027 rxq->port_id, rxq->queue_id,
1028 rx_id, nb_hold, nb_rx);
1029 rx_id = (uint16_t)(rx_id == 0 ?
1030 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1031 AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1034 rxq->nb_rx_hold = nb_hold;
1040 avf_xmit_cleanup(struct avf_tx_queue *txq)
1042 struct avf_tx_entry *sw_ring = txq->sw_ring;
1043 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
1044 uint16_t nb_tx_desc = txq->nb_tx_desc;
1045 uint16_t desc_to_clean_to;
1046 uint16_t nb_tx_to_clean;
1048 volatile struct avf_tx_desc *txd = txq->tx_ring;
1050 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
1051 if (desc_to_clean_to >= nb_tx_desc)
1052 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
1054 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
1055 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
1056 rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
1057 rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) {
1058 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
1059 "(port=%d queue=%d)", desc_to_clean_to,
1060 txq->port_id, txq->queue_id);
1064 if (last_desc_cleaned > desc_to_clean_to)
1065 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
1068 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
1071 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
1073 txq->last_desc_cleaned = desc_to_clean_to;
1074 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
1079 /* Check if the context descriptor is needed for TX offloading */
1080 static inline uint16_t
1081 avf_calc_context_desc(uint64_t flags)
1083 static uint64_t mask = PKT_TX_TCP_SEG;
1085 return (flags & mask) ? 1 : 0;
1089 avf_txd_enable_checksum(uint64_t ol_flags,
1091 uint32_t *td_offset,
1092 union avf_tx_offload tx_offload)
1095 *td_offset |= (tx_offload.l2_len >> 1) <<
1096 AVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1098 /* Enable L3 checksum offloads */
1099 if (ol_flags & PKT_TX_IP_CKSUM) {
1100 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
1101 *td_offset |= (tx_offload.l3_len >> 2) <<
1102 AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1103 } else if (ol_flags & PKT_TX_IPV4) {
1104 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4;
1105 *td_offset |= (tx_offload.l3_len >> 2) <<
1106 AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1107 } else if (ol_flags & PKT_TX_IPV6) {
1108 *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV6;
1109 *td_offset |= (tx_offload.l3_len >> 2) <<
1110 AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
1113 if (ol_flags & PKT_TX_TCP_SEG) {
1114 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
1115 *td_offset |= (tx_offload.l4_len >> 2) <<
1116 AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1120 /* Enable L4 checksum offloads */
1121 switch (ol_flags & PKT_TX_L4_MASK) {
1122 case PKT_TX_TCP_CKSUM:
1123 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
1124 *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
1125 AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1127 case PKT_TX_SCTP_CKSUM:
1128 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_SCTP;
1129 *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
1130 AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1132 case PKT_TX_UDP_CKSUM:
1133 *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_UDP;
1134 *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
1135 AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1142 /* set TSO context descriptor
1143 * support IP -> L4 and IP -> IP -> L4
1145 static inline uint64_t
1146 avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
1148 uint64_t ctx_desc = 0;
1149 uint32_t cd_cmd, hdr_len, cd_tso_len;
1151 if (!tx_offload.l4_len) {
1152 PMD_TX_LOG(DEBUG, "L4 length set to 0");
1156 /* in case of non tunneling packet, the outer_l2_len and
1157 * outer_l3_len must be 0.
1159 hdr_len = tx_offload.l2_len +
1163 cd_cmd = AVF_TX_CTX_DESC_TSO;
1164 cd_tso_len = mbuf->pkt_len - hdr_len;
1165 ctx_desc |= ((uint64_t)cd_cmd << AVF_TXD_CTX_QW1_CMD_SHIFT) |
1166 ((uint64_t)cd_tso_len << AVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1167 ((uint64_t)mbuf->tso_segsz << AVF_TXD_CTX_QW1_MSS_SHIFT);
1172 /* Construct the tx flags */
1173 static inline uint64_t
1174 avf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
1177 return rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DATA |
1178 ((uint64_t)td_cmd << AVF_TXD_QW1_CMD_SHIFT) |
1179 ((uint64_t)td_offset <<
1180 AVF_TXD_QW1_OFFSET_SHIFT) |
1182 AVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
1183 ((uint64_t)td_tag <<
1184 AVF_TXD_QW1_L2TAG1_SHIFT));
1189 avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1191 volatile struct avf_tx_desc *txd;
1192 volatile struct avf_tx_desc *txr;
1193 struct avf_tx_queue *txq;
1194 struct avf_tx_entry *sw_ring;
1195 struct avf_tx_entry *txe, *txn;
1196 struct rte_mbuf *tx_pkt;
1197 struct rte_mbuf *m_seg;
1208 uint64_t buf_dma_addr;
1209 union avf_tx_offload tx_offload = {0};
1212 sw_ring = txq->sw_ring;
1214 tx_id = txq->tx_tail;
1215 txe = &sw_ring[tx_id];
1217 /* Check if the descriptor ring needs to be cleaned. */
1218 if (txq->nb_free < txq->free_thresh)
1219 avf_xmit_cleanup(txq);
1221 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1226 tx_pkt = *tx_pkts++;
1227 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1229 ol_flags = tx_pkt->ol_flags;
1230 tx_offload.l2_len = tx_pkt->l2_len;
1231 tx_offload.l3_len = tx_pkt->l3_len;
1232 tx_offload.l4_len = tx_pkt->l4_len;
1233 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1235 /* Calculate the number of context descriptors needed. */
1236 nb_ctx = avf_calc_context_desc(ol_flags);
1238 /* The number of descriptors that must be allocated for
1239 * a packet equals to the number of the segments of that
1240 * packet plus 1 context descriptor if needed.
1242 nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
1243 tx_last = (uint16_t)(tx_id + nb_used - 1);
1246 if (tx_last >= txq->nb_tx_desc)
1247 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1249 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
1250 " tx_first=%u tx_last=%u",
1251 txq->port_id, txq->queue_id, tx_id, tx_last);
1253 if (nb_used > txq->nb_free) {
1254 if (avf_xmit_cleanup(txq)) {
1259 if (unlikely(nb_used > txq->rs_thresh)) {
1260 while (nb_used > txq->nb_free) {
1261 if (avf_xmit_cleanup(txq)) {
1270 /* Descriptor based VLAN insertion */
1271 if (ol_flags & PKT_TX_VLAN_PKT) {
1272 td_cmd |= AVF_TX_DESC_CMD_IL2TAG1;
1273 td_tag = tx_pkt->vlan_tci;
1276 /* According to datasheet, the bit2 is reserved and must be
1281 /* Enable checksum offloading */
1282 if (ol_flags & AVF_TX_CKSUM_OFFLOAD_MASK)
1283 avf_txd_enable_checksum(ol_flags, &td_cmd,
1284 &td_offset, tx_offload);
1287 /* Setup TX context descriptor if required */
1288 volatile struct avf_tx_context_desc *ctx_txd =
1289 (volatile struct avf_tx_context_desc *)
1291 uint16_t cd_l2tag2 = 0;
1292 uint64_t cd_type_cmd_tso_mss =
1293 AVF_TX_DESC_DTYPE_CONTEXT;
1295 txn = &sw_ring[txe->next_id];
1296 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1298 rte_pktmbuf_free_seg(txe->mbuf);
1303 if (ol_flags & PKT_TX_TCP_SEG)
1304 cd_type_cmd_tso_mss |=
1305 avf_set_tso_ctx(tx_pkt, tx_offload);
1307 AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
1308 txe->last_id = tx_last;
1309 tx_id = txe->next_id;
1316 txn = &sw_ring[txe->next_id];
1319 rte_pktmbuf_free_seg(txe->mbuf);
1322 /* Setup TX Descriptor */
1323 slen = m_seg->data_len;
1324 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1325 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
1326 txd->cmd_type_offset_bsz = avf_build_ctob(td_cmd,
1331 AVF_DUMP_TX_DESC(txq, txd, tx_id);
1332 txe->last_id = tx_last;
1333 tx_id = txe->next_id;
1335 m_seg = m_seg->next;
1338 /* The last packet data descriptor needs End Of Packet (EOP) */
1339 td_cmd |= AVF_TX_DESC_CMD_EOP;
1340 txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
1341 txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
1343 if (txq->nb_used >= txq->rs_thresh) {
1344 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
1345 "%4u (port=%d queue=%d)",
1346 tx_last, txq->port_id, txq->queue_id);
1348 td_cmd |= AVF_TX_DESC_CMD_RS;
1350 /* Update txq RS bit counters */
1354 txd->cmd_type_offset_bsz |=
1355 rte_cpu_to_le_64(((uint64_t)td_cmd) <<
1356 AVF_TXD_QW1_CMD_SHIFT);
1357 AVF_DUMP_TX_DESC(txq, txd, tx_id);
1363 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1364 txq->port_id, txq->queue_id, tx_id, nb_tx);
1366 AVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
1367 txq->tx_tail = tx_id;
1373 avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
1377 struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
1382 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
1383 ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
1393 /* TX prep functions */
1395 avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1402 for (i = 0; i < nb_pkts; i++) {
1404 ol_flags = m->ol_flags;
1406 /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
1407 if (!(ol_flags & PKT_TX_TCP_SEG)) {
1408 if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
1409 rte_errno = -EINVAL;
1412 } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
1413 (m->tso_segsz > AVF_MAX_TSO_MSS)) {
1414 /* MSS outside the range are considered malicious */
1415 rte_errno = -EINVAL;
1419 if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
1420 rte_errno = -ENOTSUP;
1424 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1425 ret = rte_validate_tx_offload(m);
1431 ret = rte_net_intel_cksum_prepare(m);
1441 /* choose rx function*/
1443 avf_set_rx_function(struct rte_eth_dev *dev)
1445 struct avf_adapter *adapter =
1446 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1447 struct avf_rx_queue *rxq;
1450 if (adapter->rx_vec_allowed) {
1451 if (dev->data->scattered_rx) {
1452 PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
1453 " (port=%d).", dev->data->port_id);
1454 dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
1456 PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
1457 " (port=%d).", dev->data->port_id);
1458 dev->rx_pkt_burst = avf_recv_pkts_vec;
1460 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1461 rxq = dev->data->rx_queues[i];
1464 avf_rxq_vec_setup(rxq);
1466 } else if (dev->data->scattered_rx) {
1467 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
1468 dev->data->port_id);
1469 dev->rx_pkt_burst = avf_recv_scattered_pkts;
1471 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
1472 dev->data->port_id);
1473 dev->rx_pkt_burst = avf_recv_pkts;
1477 /* choose tx function*/
1479 avf_set_tx_function(struct rte_eth_dev *dev)
1481 struct avf_adapter *adapter =
1482 AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1483 struct avf_tx_queue *txq;
1486 if (adapter->tx_vec_allowed) {
1487 PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
1488 dev->data->port_id);
1489 dev->tx_pkt_burst = avf_xmit_pkts_vec;
1490 dev->tx_pkt_prepare = NULL;
1491 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1492 txq = dev->data->tx_queues[i];
1495 avf_txq_vec_setup(txq);
1498 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
1499 dev->data->port_id);
1500 dev->tx_pkt_burst = avf_xmit_pkts;
1501 dev->tx_pkt_prepare = avf_prep_pkts;
1506 avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1507 struct rte_eth_rxq_info *qinfo)
1509 struct avf_rx_queue *rxq;
1511 rxq = dev->data->rx_queues[queue_id];
1513 qinfo->mp = rxq->mp;
1514 qinfo->scattered_rx = dev->data->scattered_rx;
1515 qinfo->nb_desc = rxq->nb_rx_desc;
1517 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1518 qinfo->conf.rx_drop_en = TRUE;
1519 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1523 avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1524 struct rte_eth_txq_info *qinfo)
1526 struct avf_tx_queue *txq;
1528 txq = dev->data->tx_queues[queue_id];
1530 qinfo->nb_desc = txq->nb_tx_desc;
1532 qinfo->conf.tx_free_thresh = txq->free_thresh;
1533 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
1534 qinfo->conf.txq_flags = txq->txq_flags;
1535 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1538 /* Get the number of used descriptors of a rx queue */
1540 avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
1542 #define AVF_RXQ_SCAN_INTERVAL 4
1543 volatile union avf_rx_desc *rxdp;
1544 struct avf_rx_queue *rxq;
1547 rxq = dev->data->rx_queues[queue_id];
1548 rxdp = &rxq->rx_ring[rxq->rx_tail];
1549 while ((desc < rxq->nb_rx_desc) &&
1550 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1551 AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
1552 (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
1553 /* Check the DD bit of a rx descriptor of each 4 in a group,
1554 * to avoid checking too frequently and downgrading performance
1557 desc += AVF_RXQ_SCAN_INTERVAL;
1558 rxdp += AVF_RXQ_SCAN_INTERVAL;
1559 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1560 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1561 desc - rxq->nb_rx_desc]);
1568 avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
1570 struct avf_rx_queue *rxq = rx_queue;
1571 volatile uint64_t *status;
1575 if (unlikely(offset >= rxq->nb_rx_desc))
1578 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1579 return RTE_ETH_RX_DESC_UNAVAIL;
1581 desc = rxq->rx_tail + offset;
1582 if (desc >= rxq->nb_rx_desc)
1583 desc -= rxq->nb_rx_desc;
1585 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
1586 mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
1587 << AVF_RXD_QW1_STATUS_SHIFT);
1589 return RTE_ETH_RX_DESC_DONE;
1591 return RTE_ETH_RX_DESC_AVAIL;
1595 avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
1597 struct avf_tx_queue *txq = tx_queue;
1598 volatile uint64_t *status;
1599 uint64_t mask, expect;
1602 if (unlikely(offset >= txq->nb_tx_desc))
1605 desc = txq->tx_tail + offset;
1606 /* go to next desc that has the RS bit */
1607 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
1609 if (desc >= txq->nb_tx_desc) {
1610 desc -= txq->nb_tx_desc;
1611 if (desc >= txq->nb_tx_desc)
1612 desc -= txq->nb_tx_desc;
1615 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1616 mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
1617 expect = rte_cpu_to_le_64(
1618 AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
1619 if ((*status & mask) == expect)
1620 return RTE_ETH_TX_DESC_DONE;
1622 return RTE_ETH_TX_DESC_FULL;
1625 uint16_t __attribute__((weak))
1626 avf_recv_pkts_vec(__rte_unused void *rx_queue,
1627 __rte_unused struct rte_mbuf **rx_pkts,
1628 __rte_unused uint16_t nb_pkts)
1633 uint16_t __attribute__((weak))
1634 avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
1635 __rte_unused struct rte_mbuf **rx_pkts,
1636 __rte_unused uint16_t nb_pkts)
1641 uint16_t __attribute__((weak))
1642 avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
1643 __rte_unused struct rte_mbuf **tx_pkts,
1644 __rte_unused uint16_t nb_pkts)
1649 int __attribute__((weak))
1650 avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
1655 int __attribute__((weak))
1656 avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)