+ return tun_len;
+}
+
+uint16_t
+txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq;
+ struct txgbe_tx_entry *sw_ring;
+ struct txgbe_tx_entry *txe, *txn;
+ volatile struct txgbe_tx_desc *txr;
+ volatile struct txgbe_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union txgbe_tx_offload tx_offload;
+#ifdef RTE_LIB_SECURITY
+ uint8_t use_ipsec;
+#endif
+
+ tx_offload.data[0] = 0;
+ tx_offload.data[1] = 0;
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_xmit_cleanup(txq);
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIB_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
+ tx_pkt->packet_type);
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec) {
+ union txgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union txgbe_crypto_tx_desc_md *)
+ rte_security_dynfield(tx_pkt);
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
+ /* If new context need be built or reuse the exist ctx*/
+ ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
+ /* Only allocate context descriptor if required */
+ new_ctx = (ctx == TXGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (uint16_t)txq->port_id,
+ (uint16_t)txq->queue_id,
+ (uint32_t)pkt_len,
+ (uint16_t)tx_id,
+ (uint16_t)tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_free_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_free_thresh */
+ if (unlikely(nb_used > txq->tx_free_thresh)) {
+ PMD_TX_FREE_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_free_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_free_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (txgbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in all Data Descriptors:
+ * - TXGBE_TXD_DTYP_DATA
+ * - TXGBE_TXD_DCMD_DEXT
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - TXGBE_TXD_DCMD_IFCS
+ * - TXGBE_TXD_MAC_1588
+ * - TXGBE_TXD_DCMD_VLE
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - TXGBE_TXD_CMD_EOP
+ *
+ * The following bits can be set in any Data Descriptor, but
+ * are only set in the last Data Descriptor:
+ * - TXGBE_TXD_CMD_RS
+ */
+ cmd_type_len = TXGBE_TXD_FCS;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= TXGBE_TXD_1588;
+#endif
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len
+ */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ pkt_len -=
+ (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+ ? tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len : 0;
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct txgbe_tx_ctx_desc *ctx_txd;
+
+ ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |=
+ tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= TXGBE_TXD_IDX(ctx);
+ }
+
+ olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec)
+ olinfo_status |= TXGBE_TXD_IPSEC;
+#endif
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->dw3 = rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= TXGBE_TXD_EOP;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
+ }
+
+end_of_tx:
+
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
+ (uint16_t)tx_id, (uint16_t)nb_tx);
+ txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
+ * RX functions
+ *
+ **********************************************************************/
+/* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
+static inline uint32_t
+txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
+{
+ uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
+
+ ptid &= ptid_mask;
+
+ return txgbe_decode_ptype(ptid);
+}
+
+static inline uint64_t
+txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
+ 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+ 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+ PKT_RX_RSS_HASH, 0, 0, 0,
+ 0, 0, 0, PKT_RX_FDIR,
+ };
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+ int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
+ if (likely(-1 != etfid))
+ return ip_pkt_etqf_map[etfid] |
+ ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+ else
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#else
+ return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
+#endif
+}
+
+static inline uint64_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
+{
+ uint64_t pkt_flags;
+
+ /*
+ * Check if VLAN present only.
+ * Do not check whether L3/L4 rx checksum done by NIC or not,
+ * That can be found from rte_eth_rxmode.offloads flag
+ */
+ pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
+ vlan_flags & PKT_RX_VLAN_STRIPPED)
+ ? vlan_flags : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & TXGBE_RXD_STAT_1588)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+ return pkt_flags;
+}
+
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+ uint64_t pkt_flags = 0;
+
+ /* checksum offload can't be disabled */
+ if (rx_status & TXGBE_RXD_STAT_IPCS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
+ ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_L4CS) {
+ pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
+ ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
+ }
+
+ if (rx_status & TXGBE_RXD_STAT_EIPCS &&
+ rx_status & TXGBE_RXD_ERR_EIPCS) {
+ pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ }
+
+#ifdef RTE_LIB_SECURITY
+ if (rx_status & TXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & TXGBE_RXD_ERR_SECERR)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
+ return pkt_flags;
+}
+
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD TXGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t pkt_len;
+ uint64_t pkt_flags;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint32_t pkt_info[LOOK_AHEAD];
+ int i, j, nb_rx = 0;
+ uint32_t status;
+
+ /* get references to current descriptor and S/W ring entry */
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+ rxep = &rxq->sw_ring[rxq->rx_tail];
+
+ status = rxdp->qw1.lo.status;
+ /* check to make sure there is at least 1 packet to receive */
+ if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ return 0;
+
+ /*
+ * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
+ * reference packets that are ready to be received.
+ */
+ for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
+ /* Read desc statuses backwards to avoid race condition */
+ for (j = 0; j < LOOK_AHEAD; j++)
+ s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
+
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ /* Compute how many status bits were set */
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
+
+ nb_rx += nb_dd;
+
+ /* Translate descriptor info to mbuf format */
+ for (j = 0; j < nb_dd; ++j) {
+ mb = rxep[j].mbuf;
+ pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+ rxq->crc_len;
+ mb->data_len = pkt_len;
+ mb->pkt_len = pkt_len;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
+
+ /* convert descriptor fields to rte mbuf flags */
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j],
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
+ mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss =
+ rte_le_to_cpu_32(rxdp[j].qw0.dw1);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash =
+ rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id =
+ rte_le_to_cpu_16(rxdp[j].qw0.hi.ipid);
+ }
+ }
+
+ /* Move mbuf pointers from the S/W ring to the stage */
+ for (j = 0; j < LOOK_AHEAD; ++j)
+ rxq->rx_stage[i + j] = rxep[j].mbuf;
+
+ /* stop if all requested packets could not be received */
+ if (nb_dd != LOOK_AHEAD)
+ break;
+ }
+
+ /* clear software ring entries so we can cleanup correctly */
+ for (i = 0; i < nb_rx; ++i)
+ rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
+
+ return nb_rx;
+}
+
+static inline int
+txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
+{
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *rxep;
+ struct rte_mbuf *mb;
+ uint16_t alloc_idx;
+ __le64 dma_addr;
+ int diag, i;
+
+ /* allocate buffers in bulk directly into the S/W ring */
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
+ rxep = &rxq->sw_ring[alloc_idx];
+ diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
+ rxq->rx_free_thresh);
+ if (unlikely(diag != 0))
+ return -ENOMEM;
+
+ rxdp = &rxq->rx_ring[alloc_idx];
+ for (i = 0; i < rxq->rx_free_thresh; ++i) {
+ /* populate the static rte mbuf fields */
+ mb = rxep[i].mbuf;
+ if (reset_mbuf)
+ mb->port = rxq->port_id;
+
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* populate the descriptors */
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
+ TXGBE_RXD_HDRADDR(&rxdp[i], 0);
+ TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
+ }
+
+ /* update state of internal queue structure */
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
+ if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
+
+ /* no errors */
+ return 0;
+}
+
+static inline uint16_t
+txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
+ int i;
+
+ /* how many packets are ready to return? */
+ nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
+
+ /* copy mbuf pointers to the application's packet list */
+ for (i = 0; i < nb_pkts; ++i)
+ rx_pkts[i] = stage[i];
+
+ /* update internal queue state */
+ rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
+ rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
+
+ return nb_pkts;
+}
+
+static inline uint16_t
+txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ uint16_t nb_rx = 0;
+
+ /* Any previously recv'd pkts will be returned from the Rx stage */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ /* Scan the H/W ring for packets to receive */
+ nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
+
+ /* update internal queue state */
+ rxq->rx_next_avail = 0;
+ rxq->rx_nb_avail = nb_rx;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
+
+ /* if required, allocate new buffers to replenish descriptors */
+ if (rxq->rx_tail > rxq->rx_free_trigger) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
+ int i, j;
+
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (uint16_t)rxq->port_id,
+ (uint16_t)rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
+ /*
+ * Need to rewind any previous receives if we cannot
+ * allocate new buffers to replenish the old ones.
+ */
+ rxq->rx_nb_avail = 0;
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
+ for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
+ rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
+
+ return 0;
+ }
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
+ }
+
+ if (rxq->rx_tail >= rxq->nb_rx_desc)
+ rxq->rx_tail = 0;
+
+ /* received any packets this loop? */
+ if (rxq->rx_nb_avail)
+ return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+
+ return 0;
+}
+
+/* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
+uint16_t
+txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_rx;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
+ return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+
+ /* request is relatively large, chunk it up */
+ nb_rx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
+ ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+ nb_rx = (uint16_t)(nb_rx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_rx;
+}
+
+uint16_t
+txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_rx_queue *rxq;
+ volatile struct txgbe_rx_desc *rx_ring;
+ volatile struct txgbe_rx_desc *rxdp;
+ struct txgbe_rx_entry *sw_ring;
+ struct txgbe_rx_entry *rxe;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ struct txgbe_rx_desc rxd;
+ uint64_t dma_addr;
+ uint32_t staterr;
+ uint32_t pkt_info;
+ uint16_t pkt_len;
+ uint16_t rx_id;
+ uint16_t nb_rx;
+ uint16_t nb_hold;
+ uint64_t pkt_flags;
+
+ nb_rx = 0;
+ nb_hold = 0;
+ rxq = rx_queue;
+ rx_id = rxq->rx_tail;
+ rx_ring = rxq->rx_ring;
+ sw_ring = rxq->sw_ring;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ while (nb_rx < nb_pkts) {
+ /*
+ * The order of operations here is important as the DD status
+ * bit must not be read after any other descriptor fields.
+ * rx_ring and rxdp are pointing to volatile data so the order
+ * of accesses cannot be reordered by the compiler. If they were
+ * not volatile, they could be reordered which could lead to
+ * using invalid descriptor fields when read from rxd.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rxdp->qw1.lo.status;
+ if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
+ break;
+ rxd = *rxdp;
+
+ /*
+ * End of packet.
+ *
+ * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
+ * is likely to be invalid and to be dropped by the various
+ * validation checks performed by the network stack.
+ *
+ * Allocate a new mbuf to replenish the RX ring descriptor.
+ * If the allocation fails:
+ * - arrange for that RX descriptor to be the first one
+ * being parsed the next time the receive function is
+ * invoked [on the same queue].
+ *
+ * - Stop parsing the RX ring and return immediately.
+ *
+ * This policy do not drop the packet received in the RX
+ * descriptor for which the allocation of a new mbuf failed.
+ * Thus, it allows that packet to be later retrieved if
+ * mbuf have been freed in the mean time.
+ * As a side effect, holding RX descriptors instead of
+ * systematically giving them back to the NIC may lead to
+ * RX ring exhaustion situations.
+ * However, the NIC can gracefully prevent such situations
+ * to happen by sending specific "back-pressure" flow control
+ * frames to its peer(s).
+ */
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "ext_err_stat=0x%08x pkt_len=%u",
+ (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
+ (uint16_t)rx_id, (uint32_t)staterr,
+ (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+ "queue_id=%u", (uint16_t)rxq->port_id,
+ (uint16_t)rxq->queue_id);
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ rx_id++;
+ if (rx_id == rxq->nb_rx_desc)
+ rx_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 8 pointers
+ * to mbufs.
+ */
+ if ((rx_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[rx_id]);
+ rte_txgbe_prefetch(&sw_ring[rx_id]);
+ }
+
+ rxm = rxe->mbuf;
+ rxe->mbuf = nmb;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma_addr);
+
+ /*
+ * Initialize the returned mbuf.
+ * 1) setup generic mbuf fields:
+ * - number of segments,
+ * - next segment,
+ * - packet length,
+ * - RX port identifier.
+ * 2) integrate hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ */
+ pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
+ rxq->crc_len);
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->port = rxq->port_id;
+
+ pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
+ /* Only valid if PKT_RX_VLAN set in pkt_flags */
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
+
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr,
+ rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ rxm->ol_flags = pkt_flags;
+ rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
+ } else if (pkt_flags & PKT_RX_FDIR) {
+ rxm->hash.fdir.hash =
+ rte_le_to_cpu_16(rxd.qw0.hi.csum) &
+ TXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(rxd.qw0.hi.ipid);
+ }
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = rxm;
+ }
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situation from the
+ * hardware point of view...
+ */
+ nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+ if (nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
+ (uint16_t)rx_id, (uint16_t)nb_hold,
+ (uint16_t)nb_rx);
+ rx_id = (uint16_t)((rx_id == 0) ?
+ (rxq->nb_rx_desc - 1) : (rx_id - 1));
+ txgbe_set32(rxq->rdt_reg_addr, rx_id);
+ nb_hold = 0;
+ }
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+/**
+ * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
+static inline void
+txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
+ struct txgbe_rx_queue *rxq, uint32_t staterr)
+{
+ uint32_t pkt_info;
+ uint64_t pkt_flags;
+
+ head->port = rxq->port_id;
+
+ /* The vlan_tci field is only valid when PKT_RX_VLAN is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
+ pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
+ head->ol_flags = pkt_flags;
+ head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
+ rxq->pkt_type_mask);
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
+ head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
+ } else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
+ & TXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
+ }
+}
+
+/**
+ * txgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD txgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct txgbe_rx_queue *rxq = rx_queue;
+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+ volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
+ struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct txgbe_rx_entry *rxe;
+ struct txgbe_scattered_rx_entry *sc_entry;
+ struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
+ struct txgbe_rx_entry *next_rxe = NULL;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb = NULL;
+ struct txgbe_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile struct txgbe_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ /*
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
+ *
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
+ *
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
+ *
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the txgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" and let the compiler do its job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
+
+ if (!(staterr & TXGBE_RXD_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.qw1.hi.len));
+
+ if (!bulk_alloc) {
+ nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!txgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & TXGBE_RXD_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_txgbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_txgbe_prefetch(&rx_ring[next_id]);
+ rte_txgbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ TXGBE_RXD_HDRADDR(rxdp, 0);
+ TXGBE_RXD_PKTADDR(rxdp, dma);
+ } else {
+ rxe->mbuf = NULL;
+ }
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
+ nextp_id = TXGBE_RXD_NEXTP(staterr);
+ else
+ nextp_id = next_id;
+
+ next_sc_entry = &sw_sc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_txgbe_prefetch(next_rxe);
+ }
+
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_sc_ring and continue to parse the RX ring.
+ */
+ if (!eop && next_rxe) {
+ rxm->next = next_rxe->mbuf;
+ next_sc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /* Initialize the first mbuf of the returned packet */
+ txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
+
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else {
+ rxm->data_len -= rxq->crc_len;
+ }
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
+uint64_t
+txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
+{
+ return DEV_RX_OFFLOAD_VLAN_STRIP;
+}
+
+uint64_t
+txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_RSS_HASH |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (!txgbe_is_vf(dev))
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by PF devices in a non-SR-IOV
+ * mode.
+ */
+ if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == txgbe_mac_raptor)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
+static void __rte_cold
+txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
+{
+ unsigned int i;
+
+ if (txq->sw_ring != NULL) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_ring[i].mbuf != NULL) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+}
+
+static int
+txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct txgbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (pkt_cnt < free_cnt) {
+ if (txgbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+ break;
+
+ n = txgbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
+ return txgbe_tx_done_cleanup_simple(txq, free_cnt);
+
+ return txgbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
+static void __rte_cold
+txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
+{
+ if (txq != NULL &&
+ txq->sw_ring != NULL)
+ rte_free(txq->sw_ring);
+}
+
+static void __rte_cold
+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+{
+ if (txq != NULL && txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->free_swring(txq);
+ rte_free(txq);
+ }
+}
+
+void __rte_cold
+txgbe_dev_tx_queue_release(void *txq)
+{
+ txgbe_tx_queue_release(txq);
+}
+
+/* (Re)set dynamic txgbe_tx_queue fields to defaults */
+static void __rte_cold
+txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
+{
+ static const struct txgbe_tx_desc zeroed_desc = {0};
+ struct txgbe_tx_entry *txe = txq->sw_ring;
+ uint16_t prev, i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+ txq->tx_tail = 0;
+
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
+}
+
+static const struct txgbe_txq_ops def_txq_ops = {
+ .release_mbufs = txgbe_tx_queue_release_mbufs,
+ .free_swring = txgbe_tx_free_swring,
+ .reset = txgbe_reset_tx_queue,
+};
+
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void __rte_cold
+txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
+ PMD_INIT_LOG(DEBUG,
+ " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_free_thresh,
+ (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = txgbe_xmit_pkts;
+ dev->tx_pkt_prepare = txgbe_prep_pkts;
+ }
+}
+
+uint64_t
+txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_UDP_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (!txgbe_is_vf(dev))
+ tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
+
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+int __rte_cold
+txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const struct rte_memzone *tz;
+ struct txgbe_tx_queue *txq;
+ struct txgbe_hw *hw;
+ uint16_t tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * Validate number of transmit descriptors.
+ * It must not exceed hardware maximum, and must be multiple
+ * of TXGBE_ALIGN.
+ */
+ if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
+ nb_desc > TXGBE_RING_DESC_MAX ||
+ nb_desc < TXGBE_RING_DESC_MIN) {
+ return -EINVAL;
+ }
+
+ /*
+ * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+ * descriptors are used or if the number of descriptors required
+ * to transmit a packet is greater than the number of free TX
+ * descriptors.
+ * One descriptor in the TX ring is used as a sentinel to avoid a
+ * H/W race condition, hence the maximum threshold constraints.
+ * When set to zero use default values.
+ */
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
+ "TX descriptors minus 3. (tx_free_thresh=%u "
+ "port=%d queue=%d)",
+ (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ if ((nb_desc % tx_free_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
+ "number of TX descriptors. (tx_free_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_free_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed... */
+ if (dev->data->tx_queues[queue_idx] != NULL) {
+ txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc_socket("ethdev TX queue",
+ sizeof(struct txgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ return -ENOMEM;
+
+ /*
+ * Allocate TX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
+ TXGBE_ALIGN, socket_id);
+ if (tz == NULL) {
+ txgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->tx_free_thresh = tx_free_thresh;