#include <rte_malloc.h>
#include "bnxt.h"
+#include "bnxt_hwrm.h"
#include "bnxt_ring.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
rte_free(txq->cp_ring->cp_ring_struct);
rte_free(txq->cp_ring);
+ rte_memzone_free(txq->mz);
+ txq->mz = NULL;
+
rte_free(txq);
bp->tx_queues[i] = NULL;
}
{
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_VLAN | PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
- PKT_TX_QINQ_PKT) ||
+ PKT_TX_QINQ) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
(txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
return true;
vlan_tag_flags = 0;
/* HW can accelerate only outer vlan in QinQ mode */
- if (tx_pkt->ol_flags & PKT_TX_QINQ_PKT) {
+ if (tx_pkt->ol_flags & PKT_TX_QINQ) {
vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
tx_pkt->vlan_tci_outer;
outer_tpid_bd = txq->bp->outer_tpid_bd &
BNXT_OUTER_TPID_BD_MASK;
vlan_tag_flags |= outer_tpid_bd;
- } else if (tx_pkt->ol_flags & PKT_TX_VLAN_PKT) {
+ } else if (tx_pkt->ol_flags & PKT_TX_VLAN) {
/* shurd: Should this mask at
* TX_BD_LONG_CFA_META_VLAN_VID_MASK?
*/
}
/*
- * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
+ * Transmit completion function for use when RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
* is enabled.
*/
static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
{
+ uint32_t nb_tx_pkts = 0, cons, ring_mask, opaque;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t raw_cons = cpr->cp_raw_cons;
- uint32_t cons;
- uint32_t nb_tx_pkts = 0;
+ struct bnxt_ring *cp_ring_struct;
struct tx_cmpl *txcmp;
- struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
- struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
- uint32_t ring_mask = cp_ring_struct->ring_mask;
- uint32_t opaque = 0;
if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
return 0;
+ cp_ring_struct = cpr->cp_ring_struct;
+ ring_mask = cp_ring_struct->ring_mask;
+
do {
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
- rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
- ring_mask]);
- if (!CMPL_VALID(txcmp, cpr->valid))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
- opaque = rte_cpu_to_le_32(txcmp->opaque);
- NEXT_CMPL(cpr, cons, cpr->valid, 1);
- rte_prefetch0(&cp_desc_ring[cons]);
+
+ opaque = rte_le_to_cpu_32(txcmp->opaque);
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
nb_tx_pkts += opaque;
RTE_LOG_DP(ERR, PMD,
"Unhandled CMP type %02x\n",
CMP_TYPE(txcmp));
- raw_cons = cons;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
} while (nb_tx_pkts < ring_mask);
if (nb_tx_pkts) {
- if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
bnxt_tx_cmp_fast(txq, nb_tx_pkts);
else
bnxt_tx_cmp(txq, nb_tx_pkts);
if (rc)
return rc;
+ bnxt_free_hwrm_tx_ring(bp, tx_queue_id);
+ rc = bnxt_alloc_hwrm_tx_ring(bp, tx_queue_id);
+ if (rc)
+ return rc;
+
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_started = true;
PMD_DRV_LOG(DEBUG, "Tx queue started\n");