#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_ethdev_driver.h>
#include <rte_memzone.h>
+#include <rte_atomic.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "txgbe_logs.h"
#include "base/txgbe.h"
#include "txgbe_ethdev.h"
#include "txgbe_rxtx.h"
+/* Bit Mask to indicate what bits required for building TX context */
+static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
+ PKT_TX_OUTER_IPV6 |
+ PKT_TX_OUTER_IPV4 |
+ PKT_TX_IPV6 |
+ PKT_TX_IPV4 |
+ PKT_TX_VLAN_PKT |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG |
+ PKT_TX_TUNNEL_MASK |
+ PKT_TX_OUTER_IP_CKSUM);
+
+#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
+
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_txgbe_prefetch(p) rte_prefetch0(p)
+
static int
txgbe_is_vf(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
- switch (hw->mac.type) {
- case txgbe_mac_raptor_vf:
- return 1;
- default:
- return 0;
+ switch (hw->mac.type) {
+ case txgbe_mac_raptor_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*********************************************************************
+ *
+ * TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static __rte_always_inline int
+txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
+{
+ struct txgbe_tx_entry *txep;
+ uint32_t status;
+ int i, nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].dw3;
+ if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ txgbe_set32_masked(txq->tdc_reg_addr,
+ TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+ return 0;
+ }
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_free_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
+ for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
+ /* free buffers one at a time */
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
+ txep->mbuf = NULL;
+
+ if (unlikely(m == NULL))
+ continue;
+
+ if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
+ (nb_free > 0 && m->pool != free[0]->pool)) {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free, nb_free);
+ nb_free = 0;
+ }
+
+ free[nb_free++] = m;
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+
+ return txq->tx_free_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = cpu_to_le64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
+ struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j)
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
+ (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
+static inline void
+txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
+ volatile struct txgbe_tx_ctx_desc *ctx_txd,
+ uint64_t ol_flags, union txgbe_tx_offload tx_offload)
+{
+ union txgbe_tx_offload tx_offload_mask;
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+ uint32_t ctx_idx;
+ uint32_t vlan_macip_lens;
+ uint32_t tunnel_seed;
+
+ ctx_idx = txq->ctx_curr;
+ tx_offload_mask.data[0] = 0;
+ tx_offload_mask.data[1] = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
+ type_tucmd_mlhl = TXGBE_TXD_CTXT;
+
+ tx_offload_mask.ptid |= ~0;
+ type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
+ mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ mss_l4len_idx |=
+ TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
+
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ tx_offload_mask.outer_tun_len |= ~0;
+ tx_offload_mask.outer_l2_len |= ~0;
+ tx_offload_mask.outer_l3_len |= ~0;
+ tx_offload_mask.l2_len |= ~0;
+ tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
+ tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
+
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 0b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+ vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
+ } else {
+ tunnel_seed = 0;
+ vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
+ }
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_offload_mask.vlan_tci |= ~0;
+ vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
+ }
+
+ txq->ctx_cache[ctx_idx].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data[0] =
+ tx_offload_mask.data[0] & tx_offload.data[0];
+ txq->ctx_cache[ctx_idx].tx_offload.data[1] =
+ tx_offload_mask.data[1] & tx_offload.data[1];
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
+ ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
+ ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
+ union txgbe_tx_offload tx_offload)
+{
+ /* If match with the current used context */
+ if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* What if match with the next context */
+ txq->ctx_curr ^= 1;
+ if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* Mismatch, use the previous context */
+ return TXGBE_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ uint32_t tmp = 0;
+
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_IPCS;
+ }
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ tmp |= TXGBE_TXD_CC;
+ tmp |= TXGBE_TXD_EIPCS;
+ }
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tmp |= TXGBE_TXD_CC;
+ /* implies IPv4 cksum */
+ if (ol_flags & PKT_TX_IPV4)
+ tmp |= TXGBE_TXD_IPCS;
+ tmp |= TXGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ tmp |= TXGBE_TXD_CC;
+
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype = 0;
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= TXGBE_TXD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= TXGBE_TXD_TSE;
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= TXGBE_TXD_LINKSEC;
+ return cmdtype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+{
+ bool tun;
+
+ if (ptype)
+ return txgbe_encode_ptype(ptype);
+
+ /* Only support flags in TXGBE_TX_OFFLOAD_MASK */
+ tun = !!(oflags & PKT_TX_TUNNEL_MASK);
+
+ /* L2 level */
+ ptype = RTE_PTYPE_L2_ETHER;
+ if (oflags & PKT_TX_VLAN)
+ ptype |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ /* L3 level */
+ if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
+ ptype |= RTE_PTYPE_L3_IPV4;
+ else if (oflags & (PKT_TX_OUTER_IPV6))
+ ptype |= RTE_PTYPE_L3_IPV6;
+
+ if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
+ else if (oflags & (PKT_TX_IPV6))
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
+
+ /* L4 level */
+ switch (oflags & (PKT_TX_L4_MASK)) {
+ case PKT_TX_TCP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+ break;
+ case PKT_TX_UDP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
+ break;
+ }
+
+ if (oflags & PKT_TX_TCP_SEG)
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+
+ /* Tunnel */
+ switch (oflags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_VXLAN:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GRE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GENEVE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GENEVE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_VXLAN_GPE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN_GPE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_IPIP:
+ case PKT_TX_TUNNEL_IP:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_IP;
+ break;
+ }
+
+ return txgbe_encode_ptype(ptype);
+}
+
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
+{
+ struct txgbe_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct txgbe_tx_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+ uint32_t status;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ status = txr[desc_to_clean_to].dw3;
+ if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ txgbe_set32_masked(txq->tdc_reg_addr,
+ TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_FREE_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].dw3 = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return 0;
+}
+
+static inline uint8_t
+txgbe_get_tun_len(struct rte_mbuf *mbuf)
+{
+ struct txgbe_genevehdr genevehdr;
+ const struct txgbe_genevehdr *gh;
+ uint8_t tun_len;
+
+ switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ tun_len = 0;
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_VXLAN_GPE:
+ tun_len = sizeof(struct txgbe_udphdr)
+ + sizeof(struct txgbe_vxlanhdr);
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ tun_len = sizeof(struct txgbe_nvgrehdr);
+ break;
+ case PKT_TX_TUNNEL_GENEVE:
+ gh = rte_pktmbuf_read(mbuf,
+ mbuf->outer_l2_len + mbuf->outer_l3_len,
+ sizeof(genevehdr), &genevehdr);
+ tun_len = sizeof(struct txgbe_udphdr)
+ + sizeof(struct txgbe_genevehdr)
+ + (gh->opt_len << 2);
+ break;
+ default:
+ tun_len = 0;
+ }
+
+ return tun_len;
+}
+
+uint16_t
+txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq;
+ struct txgbe_tx_entry *sw_ring;
+ struct txgbe_tx_entry *txe, *txn;
+ volatile struct txgbe_tx_desc *txr;
+ volatile struct txgbe_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union txgbe_tx_offload tx_offload;
+
+ tx_offload.data[0] = 0;
+ tx_offload.data[1] = 0;
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_xmit_cleanup(txq);
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
+ tx_pkt->packet_type);
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+
+ /* If new context need be built or reuse the exist ctx*/
+ ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
+ /* Only allocate context descriptor if required */
+ new_ctx = (ctx == TXGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (uint16_t)txq->port_id,
+ (uint16_t)txq->queue_id,
+ (uint32_t)pkt_len,
+ (uint16_t)tx_id,
+ (uint16_t)tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_free_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_free_thresh */
+ if (unlikely(nb_used > txq->tx_free_thresh)) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_free_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_free_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - TXGBE_TXD_DTYP_DATA
+ * - TXGBE_TXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - TXGBE_TXD_DCMD_IFCS
+ * - TXGBE_TXD_MAC_1588
+ * - TXGBE_TXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - TXGBE_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - TXGBE_TXD_CMD_RS
+ */
+ cmd_type_len = TXGBE_TXD_FCS;
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len
+ */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ pkt_len -=
+ (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+ ? tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len : 0;
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct txgbe_tx_ctx_desc *ctx_txd;
+
+ ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload);
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |=
+ tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= TXGBE_TXD_IDX(ctx);
+ }
+
+ olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->dw3 = rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= TXGBE_TXD_EOP;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
+ }
+
+end_of_tx:
+
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
+ (uint16_t)tx_id, (uint16_t)nb_tx);
+ txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+/* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
+static inline uint32_t
+txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
+{
+ uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
+
+ ptid &= ptid_mask;
+
+ return txgbe_decode_ptype(ptid);
+}
+
+static inline uint64_t
+txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR,
+ };
+
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.offloads flag
+ */
+ pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
+ vlan_flags & PKT_RX_VLAN_STRIPPED)
+ ? vlan_flags : 0;
+
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags = 0;
+
+ /* checksum offload can't be disabled */
+ if (rx_status & TXGBE_RXD_STAT_IPCS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
+ ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_L4CS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
+ ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_EIPCS &&
+ rx_status & TXGBE_RXD_ERR_EIPCS) {
+ pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+ return pkt_flags;
+}
+
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD TXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint32_t pkt_info[LOOK_AHEAD];
+ int i, j, nb_rx = 0;
+ uint32_t status;
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ status = rxdp->qw1.lo.status;
+ /* check to make sure there is at least 1 packet to receive */
+ if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = 0; j < LOOK_AHEAD; j++)
+ s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
+
+ rte_smp_rmb();
+
+ /* Compute how many status bits were set */
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+ rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
+ mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss =
+ rte_le_to_cpu_32(rxdp[j].qw0.dw1);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash =
+ rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id =
+ rte_le_to_cpu_16(rxdp[j].qw0.hi.ipid);
+ }
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline int
+txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ __le64 dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return -ENOMEM;
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ if (reset_mbuf)
+ mb->port = rxq->port_id;
+
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* populate the descriptors */
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ TXGBE_RXD_HDRADDR(&rxdp[i], 0);
+ TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
+ }
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
+ int i, j;
+
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (uint16_t)rxq->port_id,
+ (uint16_t)rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
+uint16_t
+txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
+ return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
+ ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq;
+ volatile struct txgbe_rx_desc *rx_ring;
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *sw_ring;
+ struct txgbe_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct txgbe_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t pkt_info;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->qw1.lo.status;
+ if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
+ * is likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "ext_err_stat=0x%08x pkt_len=%u",
+ (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
+ (uint16_t)rx_id, (uint32_t)staterr,
+ (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (uint16_t)rxq->port_id,
+ (uint16_t)rxq->queue_id);
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[rx_id]);
+ rte_txgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma_addr);
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ rxm->ol_flags = pkt_flags;
+ rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
+ } else if (pkt_flags & PKT_RX_FDIR) {
+ rxm->hash.fdir.hash =
+ rte_le_to_cpu_16(rxd.qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(rxd.qw0.hi.ipid);
+ }
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
+ (uint16_t)rx_id, (uint16_t)nb_hold,
+ (uint16_t)nb_rx);
+ rx_id = (uint16_t)((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ txgbe_set32(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+/**
+ * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
+ struct txgbe_rx_queue *rxq, uint32_t staterr)
+{
+ uint32_t pkt_info;
+ uint64_t pkt_flags;
+
+ head->port = rxq->port_id;
+
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
+ pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
+ } else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
+ & TXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
+ }
+}
+
+/**
+ * txgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD txgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct txgbe_rx_queue *rxq = rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
+ struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct txgbe_rx_entry *rxe;
+ struct txgbe_scattered_rx_entry *sc_entry;
+ struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
+ struct txgbe_rx_entry *next_rxe = NULL;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb = NULL;
+ struct txgbe_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile struct txgbe_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ /*
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
+ *
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
+ *
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
+ *
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the txgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" and let the compiler do its job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
+
+ if (!(staterr & TXGBE_RXD_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!txgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & TXGBE_RXD_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[next_id]);
+ rte_txgbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma);
+ } else {
+ rxe->mbuf = NULL;
+ }
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
+ nextp_id = TXGBE_RXD_NEXTP(staterr);
+ else
+ nextp_id = next_id;
+
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_txgbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
+ */
+ if (!eop && next_rxe) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /* Initialize the first mbuf of the returned packet */
+ txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else {
+ rxm->data_len -= rxq->crc_len;
+ }
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
}
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
}
-#ifndef DEFAULT_TX_FREE_THRESH
-#define DEFAULT_TX_FREE_THRESH 32
-#endif
+uint16_t
+txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
uint64_t
txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
txgbe_tx_queue_release(txq);
}
+/* (Re)set dynamic txgbe_tx_queue fields to defaults */
+static void __rte_cold
+txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
+{
+ static const struct txgbe_tx_desc zeroed_desc = {0};
+ struct txgbe_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+ txq->tx_tail = 0;
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
+}
+
static const struct txgbe_txq_ops def_txq_ops = {
.release_mbufs = txgbe_tx_queue_release_mbufs,
.free_swring = txgbe_tx_free_swring,
+ .reset = txgbe_reset_tx_queue,
};
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
void __rte_cold
txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
{
- RTE_SET_USED(dev);
- RTE_SET_USED(txq);
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (txq->offloads == 0 &&
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
+ PMD_INIT_LOG(DEBUG,
+ " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_free_thresh,
+ (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = txgbe_xmit_pkts;
+ dev->tx_pkt_prepare = txgbe_prep_pkts;
+ }
}
uint64_t
}
void __rte_cold
-txgbe_set_rx_function(struct rte_eth_dev *dev)
+txgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
- RTE_SET_USED(dev);
+ unsigned int i;
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (txq != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq != NULL) {
+ txgbe_rx_queue_release_mbufs(rxq);
+ txgbe_reset_rx_queue(adapter, rxq);
+ }
+ }
+}
+
+void
+txgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static int __rte_cold
+txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
+{
+ struct txgbe_rx_entry *rxe = rxq->sw_ring;
+ uint64_t dma_addr;
+ unsigned int i;
+
+ /* Initialize software ring entries */
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ volatile struct txgbe_rx_desc *rxd;
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+
+ if (mbuf == NULL) {
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
+ (unsigned int)rxq->queue_id);
+ return -ENOMEM;
+ }
+
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxd = &rxq->rx_ring[i];
+ TXGBE_RXD_HDRADDR(rxd, 0);
+ TXGBE_RXD_PKTADDR(rxd, dma_addr);
+ rxe[i].mbuf = mbuf;
+ }
+
+ return 0;
}
/**
return 0;
}
+void __rte_cold
+txgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+ /*
+ * Initialize the appropriate LRO callback.
+ *
+ * If all queues satisfy the bulk allocation preconditions
+ * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
+ * bulk allocation. Otherwise use a single allocation version.
+ */
+ if (dev->data->lro) {
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+ }
+ } else if (dev->data->scattered_rx) {
+ /*
+ * Set the non-LRO scattered callback: there are bulk and
+ * single allocation versions.
+ */
+ if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
+ "single allocation) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = txgbe_recv_pkts;
+ }
+}
+
/*
* Initializes Receive Unit.
*/
}
}
+/*
+ * Set up link loopback mode Tx->Rx.
+ */
+static inline void __rte_cold
+txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB);
+
+ msec_delay(50);
+}
+
+/*
+ * Start Transmit and Receive Units.
+ */
+int __rte_cold
+txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_rx_queue *rxq;
+ uint32_t dmatxctl;
+ uint32_t rxctrl;
+ uint16_t i;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Setup Transmit Threshold Registers */
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
+ TXGBE_TXCFG_HTHRESH_MASK |
+ TXGBE_TXCFG_WTHRESH_MASK,
+ TXGBE_TXCFG_HTHRESH(txq->hthresh) |
+ TXGBE_TXCFG_WTHRESH(txq->wthresh));
+ }
+
+ dmatxctl = rd32(hw, TXGBE_DMATXCTRL);
+ dmatxctl |= TXGBE_DMATXCTRL_ENA;
+ wr32(hw, TXGBE_DMATXCTRL, dmatxctl);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq->tx_deferred_start) {
+ ret = txgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq->rx_deferred_start) {
+ ret = txgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Enable Receive engine */
+ rxctrl = rd32(hw, TXGBE_PBRXCTL);
+ rxctrl |= TXGBE_PBRXCTL_ENA;
+ hw->mac.enable_rx_dma(hw, rxctrl);
+
+ /* If loopback mode is enabled, set up the link accordingly */
+ if (hw->mac.type == txgbe_mac_raptor &&
+ dev->data->dev_conf.lpbk_mode)
+ txgbe_setup_loopback_link_raptor(hw);
+
+ return 0;
+}
+
+void
+txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+ u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+ *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
+}
+
+void
+txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+ u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+ wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
+ wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
+ wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
+}
+
+void
+txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+ u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+ *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
+ *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
+}
+
+void
+txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+ u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+ wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
+ wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
+ wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ /* Allocate buffers for descriptor rings */
+ if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
+ return -1;
+ }
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ rxdctl |= TXGBE_RXCFG_ENA;
+ wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
+ wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+ struct txgbe_rx_queue *rxq;
+ uint32_t rxdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
+ wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
+
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+ txgbe_rx_queue_release_mbufs(rxq);
+ txgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_tx_queue *txq;
+ uint32_t txdctl;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
+
+ /* Wait until TX Enable ready */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable "
+ "Tx Queue %d", tx_queue_id);
+
+ rte_wmb();
+ wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_tx_queue *txq;
+ uint32_t txdctl;
+ uint32_t txtdh, txtdt;
+ int poll_ms;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Wait until TX queue is empty */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
+ txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
+
+ txgbe_dev_save_tx_queue(hw, txq->reg_idx);
+ wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
+
+ /* Wait until TX Enable bit clear */
+ poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+ } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
+
+ rte_delay_us(RTE_TXGBE_WAIT_100_US);
+ txgbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct txgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct txgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+