}
}
+/*********************************************************************
+ *
+ * TX functions
+ *
+ **********************************************************************/
+
+/*
+ * Check for descriptors with their DD bit set and free mbufs.
+ * Return the total number of buffers freed.
+ */
+static __rte_always_inline int
+txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
+{
+ struct txgbe_tx_entry *txep;
+ uint32_t status;
+ int i, nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].dw3;
+ if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ txgbe_set32_masked(txq->tdc_reg_addr,
+ TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
+ return 0;
+ }
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_free_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
+ for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
+ /* free buffers one at a time */
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
+ txep->mbuf = NULL;
+
+ if (unlikely(m == NULL))
+ continue;
+
+ if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
+ (nb_free > 0 && m->pool != free[0]->pool)) {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free, nb_free);
+ nb_free = 0;
+ }
+
+ free[nb_free++] = m;
+ }
+
+ if (nb_free > 0)
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+
+ return txq->tx_free_thresh;
+}
+
+/* Populate 4 descriptors with data from 4 mbufs */
+static inline void
+tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+ }
+}
+
+/* Populate 1 descriptor with data from 1 mbuf */
+static inline void
+tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
+{
+ uint64_t buf_dma_addr;
+ uint32_t pkt_len;
+
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
+ pkt_len = (*pkts)->data_len;
+
+ /* write data to descriptor */
+ txdp->qw0 = cpu_to_le64(buf_dma_addr);
+ txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
+ TXGBE_TXD_DATLEN(pkt_len));
+ txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
+
+ rte_prefetch0(&(*pkts)->pool);
+}
+
+/*
+ * Fill H/W descriptor ring with mbuf data.
+ * Copy mbuf pointers to the S/W ring.
+ */
+static inline void
+txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ uint16_t nb_pkts)
+{
+ volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
+ struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
+ const int N_PER_LOOP = 4;
+ const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
+ int mainpart, leftover;
+ int i, j;
+
+ /*
+ * Process most of the packets in chunks of N pkts. Any
+ * leftover packets will get processed one at a time.
+ */
+ mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
+ leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
+ for (i = 0; i < mainpart; i += N_PER_LOOP) {
+ /* Copy N mbuf pointers to the S/W ring */
+ for (j = 0; j < N_PER_LOOP; ++j)
+ (txep + i + j)->mbuf = *(pkts + i + j);
+ tx4(txdp + i, pkts + i);
+ }
+
+ if (unlikely(leftover > 0)) {
+ for (i = 0; i < leftover; ++i) {
+ (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
+ tx1(txdp + mainpart + i, pkts + mainpart + i);
+ }
+ }
+}
+
+static inline uint16_t
+tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
+ uint16_t n = 0;
+
+ /*
+ * Begin scanning the H/W ring for done descriptors when the
+ * number of available descriptors drops below tx_free_thresh. For
+ * each done descriptor, free the associated buffer.
+ */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ txgbe_tx_free_bufs(txq);
+
+ /* Only use descriptors that are available */
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ /* Use exactly nb_pkts descriptors */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ /*
+ * At this point, we know there are enough descriptors in the
+ * ring to transmit all the packets. This assumes that each
+ * mbuf contains a single segment, and that no new offloads
+ * are expected, which would require a new context descriptor.
+ */
+
+ /*
+ * See if we're going to wrap-around. If so, handle the top
+ * of the descriptor ring first, then do the bottom. If not,
+ * the processing looks just like the "bottom" part anyway...
+ */
+ if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
+ n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
+ txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
+ txq->tx_tail = 0;
+ }
+
+ /* Fill H/W descriptor ring with mbuf data */
+ txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
+ txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
+
+ /*
+ * Check for wrap-around. This would only happen if we used
+ * up to the last descriptor in the ring, no more, no less.
+ */
+ if (txq->tx_tail >= txq->nb_tx_desc)
+ txq->tx_tail = 0;
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
+ (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
+
+ /* update tail pointer */
+ rte_wmb();
+ txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+uint16_t
+txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ /* Try to transmit at least chunks of TX_MAX_BURST pkts */
+ if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
+ return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+
+ /* transmit more than the max burst, in chunks of TX_MAX_BURST */
+ nb_tx = 0;
+ while (nb_pkts) {
+ uint16_t ret, n;
+
+ n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
+ ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
+ nb_tx = (uint16_t)(nb_tx + ret);
+ nb_pkts = (uint16_t)(nb_pkts - ret);
+ if (ret < n)
+ break;
+ }
+
+ return nb_tx;
+}
+
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif
void __rte_cold
txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
{
- RTE_SET_USED(dev);
- RTE_SET_USED(txq);
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (txq->offloads == 0 &&
+ txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
+ dev->tx_pkt_prepare = NULL;
+ }
}
uint64_t
#define TXGBE_RXD_HDRADDR(rxd, v) \
(((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
+/*****************************************************************************
+ * Transmit Descriptor
+ *****************************************************************************/
+/**
+ * Transmit Context Descriptor (TXGBE_TXD_TYP=CTXT)
+ **/
+struct txgbe_tx_ctx_desc {
+ __le32 dw0; /* w.vlan_macip_lens */
+ __le32 dw1; /* w.seqnum_seed */
+ __le32 dw2; /* w.type_tucmd_mlhl */
+ __le32 dw3; /* w.mss_l4len_idx */
+};
+
+/* @txgbe_tx_ctx_desc.dw0 */
+#define TXGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header end */
+#define TXGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */
+#define TXGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */
+
+/* @txgbe_tx_ctx_desc.dw1 */
+/*** bit 0-31, when TXGBE_TXD_DTYP_FCOE=0 ***/
+#define TXGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */
+#define TXGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */
+#define TXGBE_TXD_ETYPE_UDP LS(0, 11, 0x1)
+#define TXGBE_TXD_ETYPE_GRE LS(1, 11, 0x1)
+#define TXGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */
+#define TXGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */
+#define TXGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */
+#define TXGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */
+/*** bit 0-31, when TXGBE_TXD_DTYP_FCOE=1 ***/
+#define TXGBE_TXD_FCOEF_EOF_MASK MS(10, 0x3) /* FC EOF index */
+#define TXGBE_TXD_FCOEF_EOF_N LS(0, 10, 0x3) /* EOFn */
+#define TXGBE_TXD_FCOEF_EOF_T LS(1, 10, 0x3) /* EOFt */
+#define TXGBE_TXD_FCOEF_EOF_NI LS(2, 10, 0x3) /* EOFni */
+#define TXGBE_TXD_FCOEF_EOF_A LS(3, 10, 0x3) /* EOFa */
+#define TXGBE_TXD_FCOEF_SOF MS(12, 0x1) /* FC SOF index */
+#define TXGBE_TXD_FCOEF_PARINC MS(13, 0x1) /* Rel_Off in F_CTL */
+#define TXGBE_TXD_FCOEF_ORIE MS(14, 0x1) /* orientation end */
+#define TXGBE_TXD_FCOEF_ORIS MS(15, 0x1) /* orientation start */
+
+/* @txgbe_tx_ctx_desc.dw2 */
+#define TXGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */
+#define TXGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */
+#define TXGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */
+#define TXGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1 ah=0 */
+#define TXGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */
+#define TXGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */
+#define TXGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */
+/* @txgbe_tx_ctx_desc.dw3 */
+#define TXGBE_TXD_DD MS(0, 0x1) /* descriptor done */
+#define TXGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */
+#define TXGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */
+#define TXGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */
+
/**
* Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
**/
__le32 dw2; /* r.cmd_type_len, w.nxtseq_seed */
__le32 dw3; /* r.olinfo_status, w.status */
};
+/* @txgbe_tx_desc.qw0 */
+
+/* @txgbe_tx_desc.dw2 */
+#define TXGBE_TXD_DATLEN(v) ((0xFFFF & (v))) /* data buffer length */
+#define TXGBE_TXD_1588 ((0x1) << 19) /* IEEE1588 time stamp */
+#define TXGBE_TXD_DATA ((0x0) << 20) /* data descriptor */
+#define TXGBE_TXD_EOP ((0x1) << 24) /* End of Packet */
+#define TXGBE_TXD_FCS ((0x1) << 25) /* Insert FCS */
+#define TXGBE_TXD_LINKSEC ((0x1) << 26) /* Insert LinkSec */
+#define TXGBE_TXD_ECU ((0x1) << 28) /* forward to ECU */
+#define TXGBE_TXD_CNTAG ((0x1) << 29) /* insert CN tag */
+#define TXGBE_TXD_VLE ((0x1) << 30) /* insert VLAN tag */
+#define TXGBE_TXD_TSE ((0x1) << 31) /* transmit segmentation */
+
+#define TXGBE_TXD_FLAGS (TXGBE_TXD_FCS | TXGBE_TXD_EOP)
+
+/* @txgbe_tx_desc.dw3 */
+#define TXGBE_TXD_DD_UNUSED TXGBE_TXD_DD
+#define TXGBE_TXD_IDX_UNUSED(v) TXGBE_TXD_IDX(v)
+#define TXGBE_TXD_CC ((0x1) << 7) /* check context */
+#define TXGBE_TXD_IPSEC ((0x1) << 8) /* request ipsec offload */
+#define TXGBE_TXD_L4CS ((0x1) << 9) /* insert TCP/UDP/SCTP csum */
+#define TXGBE_TXD_IPCS ((0x1) << 10) /* insert IPv4 csum */
+#define TXGBE_TXD_EIPCS ((0x1) << 11) /* insert outer IP csum */
+#define TXGBE_TXD_MNGFLT ((0x1) << 12) /* enable management filter */
+#define TXGBE_TXD_PAYLEN(v) ((0x7FFFF & (v)) << 13) /* payload length */
#define RTE_PMD_TXGBE_TX_MAX_BURST 32
#define RTE_PMD_TXGBE_RX_MAX_BURST 32
+#define RTE_TXGBE_TX_MAX_FREE_BUF_SZ 64
#define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \
sizeof(struct txgbe_rx_desc))
* this value.
*/
uint16_t tx_free_thresh;
+ uint16_t nb_tx_free;
+ uint16_t tx_next_dd; /**< next desc to scan for DD bit */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+ uint64_t offloads; /* Tx offload flags of DEV_TX_OFFLOAD_* */
const struct txgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
};