#include <rte_malloc.h>
#include "bnxt.h"
-#include "bnxt_cpr.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
return 0;
}
-static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
-{
- /* Tell compiler to fetch tx indices from memory. */
- rte_compiler_barrier();
-
- return txr->tx_ring_struct->ring_size -
- ((txr->tx_prod - txr->tx_cons) &
- txr->tx_ring_struct->ring_mask) - 1;
-}
-
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
struct bnxt_tx_queue *txq,
uint16_t *coal_pkts,
- uint16_t *cmpl_next)
+ struct tx_bd_long **last_txbd)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ uint32_t outer_tpid_bd = 0;
struct tx_bd_long *txbd;
struct tx_bd_long_hi *txbd1 = NULL;
uint32_t vlan_tag_flags, cfa_action;
bool long_bd = false;
+ unsigned short nr_bds = 0;
struct rte_mbuf *m_seg;
struct bnxt_sw_tx_bd *tx_buf;
static const uint32_t lhint_arr[4] = {
TX_BD_LONG_FLAGS_LHINT_LT2K
};
+ if (unlikely(is_bnxt_in_error(txq->bp)))
+ return -EIO;
+
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
- PKT_TX_TUNNEL_GENEVE))
+ PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
+ PKT_TX_QINQ_PKT) ||
+ (BNXT_TRUFLOW_EN(txq->bp) &&
+ (txq->bp->ulp_ctx->cfg_data->tx_cfa_action ||
+ txq->vfr_tx_cfa_action)))
long_bd = true;
- tx_buf = &txr->tx_buf_ring[txr->tx_prod];
- tx_buf->mbuf = tx_pkt;
- tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
+ nr_bds = long_bd + tx_pkt->nb_segs;
+ if (unlikely(bnxt_tx_avail(txq) < nr_bds))
+ return -ENOMEM;
/* Check if number of Tx descriptors is above HW limit */
- if (unlikely(tx_buf->nr_bds > BNXT_MAX_TSO_SEGS)) {
+ if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
PMD_DRV_LOG(ERR,
- "Num descriptors %d exceeds HW limit\n",
- tx_buf->nr_bds);
+ "Num descriptors %d exceeds HW limit\n", nr_bds);
return -ENOSPC;
}
/* Check non zero data_len */
RTE_VERIFY(tx_pkt->data_len);
- if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
- return -ENOMEM;
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = tx_pkt;
+ tx_buf->nr_bds = nr_bds;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->opaque = *coal_pkts;
- txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
- if (!*cmpl_next) {
- txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
- } else {
- *coal_pkts = 0;
- *cmpl_next = false;
- }
+ txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
txbd->len = tx_pkt->data_len;
if (tx_pkt->pkt_len >= 2014)
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
+ *last_txbd = txbd;
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
vlan_tag_flags = 0;
- cfa_action = 0;
- if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+
+ if (BNXT_TRUFLOW_EN(txq->bp)) {
+ if (txq->vfr_tx_cfa_action)
+ cfa_action = txq->vfr_tx_cfa_action;
+ else
+ cfa_action =
+ txq->bp->ulp_ctx->cfg_data->tx_cfa_action;
+ }
+
+ /* HW can accelerate only outer vlan in QinQ mode */
+ if (tx_buf->mbuf->ol_flags & PKT_TX_QINQ_PKT) {
+ vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
+ tx_buf->mbuf->vlan_tci_outer;
+ outer_tpid_bd = txq->bp->outer_tpid_bd &
+ BNXT_OUTER_TPID_BD_MASK;
+ vlan_tag_flags |= outer_tpid_bd;
+ } else if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
&txr->tx_desc_ring[txr->tx_prod];
txbd1->lflags = 0;
txbd1->cfa_meta = vlan_tag_flags;
- txbd1->cfa_action = cfa_action;
+
+ if (BNXT_TRUFLOW_EN(txq->bp))
+ txbd1->cfa_action = cfa_action;
if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
uint16_t hdr_size;
txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
TX_BD_LONG_LFLAGS_T_IPID;
hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
- tx_pkt->l4_len + tx_pkt->outer_l2_len +
- tx_pkt->outer_l3_len;
+ tx_pkt->l4_len;
+ hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len : 0;
/* The hdr_size is multiple of 16bit units not 8bit.
* Hence divide by 2.
*/
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) ==
+ PKT_TX_IEEE1588_TMST) {
+ /* PTP */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
+ txbd1->mss = 0;
}
} else {
txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
RTE_VERIFY(m_seg->data_len);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = m_seg;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
- txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
m_seg = m_seg->next;
static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
+ struct rte_mempool *pool = NULL;
+ struct rte_mbuf **free = txq->free;
uint16_t cons = txr->tx_cons;
+ unsigned int blk = 0;
int i, j;
for (i = 0; i < nr_pkts; i++) {
- struct bnxt_sw_tx_bd *tx_buf;
struct rte_mbuf *mbuf;
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
+ unsigned short nr_bds = tx_buf->nr_bds;
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = tx_buf->mbuf;
- tx_buf->mbuf = NULL;
-
- /* EW - no need to unmap DMA memory? */
-
- for (j = 1; j < tx_buf->nr_bds; j++)
+ for (j = 0; j < nr_bds; j++) {
+ mbuf = tx_buf->mbuf;
+ tx_buf->mbuf = NULL;
cons = RING_NEXT(txr->tx_ring_struct, cons);
- rte_pktmbuf_free(mbuf);
+ tx_buf = &txr->tx_buf_ring[cons];
+ if (!mbuf) /* long_bd's tx_buf ? */
+ continue;
+
+ mbuf = rte_pktmbuf_prefree_seg(mbuf);
+ if (unlikely(!mbuf))
+ continue;
+
+ /* EW - no need to unmap DMA memory? */
+
+ if (likely(mbuf->pool == pool)) {
+ /* Add mbuf to the bulk free array */
+ free[blk++] = mbuf;
+ } else {
+ /* Found an mbuf from a different pool. Free
+ * mbufs accumulated so far to the previous
+ * pool
+ */
+ if (likely(pool != NULL))
+ rte_mempool_put_bulk(pool,
+ (void *)free,
+ blk);
+
+ /* Start accumulating mbufs in a new pool */
+ free[0] = mbuf;
+ pool = mbuf->pool;
+ blk = 1;
+ }
+ }
}
+ if (blk)
+ rte_mempool_put_bulk(pool, (void *)free, blk);
txr->tx_cons = cons;
}
uint32_t ring_mask = cp_ring_struct->ring_mask;
uint32_t opaque = 0;
- if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
- txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
+ if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
return 0;
do {
if (nb_tx_pkts) {
bnxt_tx_cmp(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
- B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask);
+ bnxt_db_cq(cpr);
}
return nb_tx_pkts;
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct bnxt_tx_queue *txq = tx_queue;
+ int rc;
uint16_t nb_tx_pkts = 0;
uint16_t coal_pkts = 0;
- uint16_t cmpl_next = txq->cmpl_next;
+ struct bnxt_tx_queue *txq = tx_queue;
+ struct tx_bd_long *last_txbd = NULL;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
/* Tx queue was stopped; wait for it to be restarted */
- if (txq->tx_deferred_start) {
+ if (unlikely(!txq->tx_started)) {
PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
return 0;
}
- txq->cmpl_next = 0;
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
- int rc;
-
- /* Request a completion on first and last packet */
- cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
coal_pkts++;
rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
- &coal_pkts, &cmpl_next);
+ &coal_pkts, &last_txbd);
- if (unlikely(rc)) {
- /* Request a completion in next cycle */
- txq->cmpl_next = 1;
+ if (unlikely(rc))
break;
- }
}
- if (nb_tx_pkts)
- B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
+ if (likely(nb_tx_pkts)) {
+ /* Request a completion on the last packet */
+ last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
+ bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod);
+ }
return nb_tx_pkts;
}
+/*
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ */
+uint16_t
+bnxt_dummy_xmit_pkts(void *tx_queue __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- txq->tx_deferred_start = false;
+ txq->tx_started = true;
PMD_DRV_LOG(DEBUG, "Tx queue started\n");
return 0;
int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- txq->tx_deferred_start = true;
+ txq->tx_started = false;
PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
return 0;