summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
77942f5)
This patch addresses the following issues with Tx batching:
1. Tx stall observed in some conditions:
The batching code doesn't request for a completion when only a partial
chain of packets is transmitted due to mbuf allocation errors. Because
of this, Tx consumer index is not updated correctly and it eventually
leads to qfull condition. Fix this by requesting a completion for the
last packet in the partial chain that is transmitted successfully.
2. Tx stall seen with Jumbo frames:
With jumbo frames, number of TxBDs is > 1. While setting up these
additional BDs in bnxt_start_xmit(), the flags field is being set using
the OR-assignment operator. We end up using a stale value of the flags
field (from a previous use of that descriptor). This results in an
invalid completion and eventually leads to tx stall. Fix this to just
assign the flags field with the right value.
Fixes:
5735eb241947 ("net/bnxt: support Tx batching")
Cc: stable@dpdk.org
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
uint8_t wthresh; /* Write-back threshold reg */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
uint8_t wthresh; /* Write-back threshold reg */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
- uint8_t cmpl_next; /* Next BD to trigger a compl */
struct bnxt *bp;
int index;
struct bnxt *bp;
int index;
-static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
+static inline uint32_t bnxt_tx_bds_in_hw(struct bnxt_tx_queue *txq)
+{
+ return ((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
+ txq->tx_ring->tx_ring_struct->ring_mask);
+}
+
+static inline uint32_t bnxt_tx_avail(struct bnxt_tx_queue *txq)
{
/* Tell compiler to fetch tx indices from memory. */
rte_compiler_barrier();
{
/* Tell compiler to fetch tx indices from memory. */
rte_compiler_barrier();
- return txr->tx_ring_struct->ring_size -
- ((txr->tx_prod - txr->tx_cons) &
- txr->tx_ring_struct->ring_mask) - 1;
+ return ((txq->tx_ring->tx_ring_struct->ring_size -
+ bnxt_tx_bds_in_hw(txq)) - 1);
}
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
struct bnxt_tx_queue *txq,
uint16_t *coal_pkts,
}
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
struct bnxt_tx_queue *txq,
uint16_t *coal_pkts,
+ uint16_t *cmpl_next,
+ struct tx_bd_long **last_txbd)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
struct tx_bd_long_hi *txbd1 = NULL;
uint32_t vlan_tag_flags, cfa_action;
bool long_bd = false;
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
struct tx_bd_long_hi *txbd1 = NULL;
uint32_t vlan_tag_flags, cfa_action;
bool long_bd = false;
+ unsigned short nr_bds = 0;
struct rte_mbuf *m_seg;
struct bnxt_sw_tx_bd *tx_buf;
static const uint32_t lhint_arr[4] = {
struct rte_mbuf *m_seg;
struct bnxt_sw_tx_bd *tx_buf;
static const uint32_t lhint_arr[4] = {
PKT_TX_TUNNEL_GENEVE))
long_bd = true;
PKT_TX_TUNNEL_GENEVE))
long_bd = true;
- tx_buf = &txr->tx_buf_ring[txr->tx_prod];
- tx_buf->mbuf = tx_pkt;
- tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
+ nr_bds = long_bd + tx_pkt->nb_segs;
+ if (unlikely(bnxt_tx_avail(txq) < nr_bds))
+ return -ENOMEM;
/* Check if number of Tx descriptors is above HW limit */
/* Check if number of Tx descriptors is above HW limit */
- if (unlikely(tx_buf->nr_bds > BNXT_MAX_TSO_SEGS)) {
+ if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
- "Num descriptors %d exceeds HW limit\n",
- tx_buf->nr_bds);
+ "Num descriptors %d exceeds HW limit\n", nr_bds);
/* Check non zero data_len */
RTE_VERIFY(tx_pkt->data_len);
/* Check non zero data_len */
RTE_VERIFY(tx_pkt->data_len);
- if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
- return -ENOMEM;
+ tx_buf = &txr->tx_buf_ring[txr->tx_prod];
+ tx_buf->mbuf = tx_pkt;
+ tx_buf->nr_bds = nr_bds;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->opaque = *coal_pkts;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->opaque = *coal_pkts;
- txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
if (!*cmpl_next) {
txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
if (!*cmpl_next) {
txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
else
txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
else
txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
txbd = &txr->tx_desc_ring[txr->tx_prod];
txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
- txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
m_seg = m_seg->next;
txbd->len = m_seg->data_len;
m_seg = m_seg->next;
uint32_t ring_mask = cp_ring_struct->ring_mask;
uint32_t opaque = 0;
uint32_t ring_mask = cp_ring_struct->ring_mask;
uint32_t opaque = 0;
- if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
- txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
+ if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
struct bnxt_tx_queue *txq = tx_queue;
uint16_t nb_tx_pkts = 0;
uint16_t coal_pkts = 0;
struct bnxt_tx_queue *txq = tx_queue;
uint16_t nb_tx_pkts = 0;
uint16_t coal_pkts = 0;
- uint16_t cmpl_next = txq->cmpl_next;
+ uint16_t cmpl_next = 0;
+ struct tx_bd_long *last_txbd = NULL;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
int rc;
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
int rc;
- /* Request a completion on first and last packet */
+ /* Request a completion on the last packet */
cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
coal_pkts++;
rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
coal_pkts++;
rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
- &coal_pkts, &cmpl_next);
+ &coal_pkts, &cmpl_next, &last_txbd);
- /* Request a completion in next cycle */
- txq->cmpl_next = 1;
+ /* Request a completion on the last successfully
+ * enqueued packet
+ */
+ if (last_txbd)
+ last_txbd->flags_type &=
+ ~TX_BD_LONG_FLAGS_NO_CMPL;