int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
+ struct bnxt_cp_ring_info *nqr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring *ring;
ring->vmem_size = 0;
ring->vmem = NULL;
+ if (BNXT_HAS_NQ(txq->bp)) {
+ nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (nqr == NULL)
+ return -ENOMEM;
+
+ txq->nq_ring = nqr;
+
+ ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL)
+ return -ENOMEM;
+
+ nqr->cp_ring_struct = ring;
+ ring->ring_size = txr->tx_ring_struct->ring_size;
+ ring->ring_mask = ring->ring_size - 1;
+ ring->bd = (void *)nqr->cp_desc_ring;
+ ring->bd_dma = nqr->cp_desc_mapping;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+ }
+
return 0;
}
TX_BD_LONG_FLAGS_LHINT_LT2K
};
+ if (unlikely(is_bnxt_in_error(txq->bp)))
+ return -EIO;
+
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
- PKT_TX_TUNNEL_GENEVE))
+ PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST))
long_bd = true;
nr_bds = long_bd + tx_pkt->nb_segs;
txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
TX_BD_LONG_LFLAGS_T_IPID;
hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
- tx_pkt->l4_len + tx_pkt->outer_l2_len +
- tx_pkt->outer_l3_len;
+ tx_pkt->l4_len;
+ hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len : 0;
/* The hdr_size is multiple of 16bit units not 8bit.
* Hence divide by 2.
*/
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) ==
+ PKT_TX_IEEE1588_TMST) {
+ /* PTP */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
+ txbd1->mss = 0;
}
} else {
txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
return nb_tx_pkts;
}
+/*
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ */
+uint16_t
+bnxt_dummy_xmit_pkts(void *tx_queue __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_deferred_start = false;
{
struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);