#include <stdbool.h>
#include <rte_io.h>
+#include "hsi_struct_def_dpdk.h"
struct bnxt_db_info;
-#define CMP_VALID(cmp, raw_cons, ring) \
- (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \
- CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size)))
-
#define CMP_TYPE(cmp) \
(((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK)
bool bnxt_is_master_func(struct bnxt *bp);
void bnxt_stop_rxtx(struct bnxt *bp);
+
+/**
+ * Check validity of a completion ring entry. If the entry is valid, include a
+ * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * completion are not hoisted by the compiler or by the CPU to come before the
+ * loading of the "valid" field.
+ *
+ * Note: the caller must not access any fields in the specified completion
+ * entry prior to calling this function.
+ *
+ * @param cmpl
+ * Pointer to an entry in the completion ring.
+ * @param raw_cons
+ * Raw consumer index of entry in completion ring.
+ * @param ring_size
+ * Size of completion ring.
+ */
+static __rte_always_inline bool
+bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
+{
+ const struct cmpl_base *c = cmpl;
+ bool expected, valid;
+
+ expected = !(raw_cons & ring_size);
+ valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
+ if (valid == expected) {
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ return true;
+ }
+ return false;
+}
#endif
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt_cp_ring_info *cpr;
- uint32_t desc = 0, raw_cons;
+ uint32_t desc = 0, raw_cons, cp_ring_size;
struct bnxt_rx_queue *rxq;
struct rx_pkt_cmpl *rxcmp;
int rc;
rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
raw_cons = cpr->cp_raw_cons;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
while (1) {
uint32_t agg_cnt, cons, cmpl_type;
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
- uint32_t desc, raw_cons;
+ uint32_t desc, raw_cons, cp_ring_size;
struct bnxt *bp = rxq->bp;
struct rx_pkt_cmpl *rxcmp;
int rc;
rxr = rxq->rx_ring;
cpr = rxq->cp_ring;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
/*
* For the vector receive case, the completion at the requested
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
return RTE_ETH_RX_DESC_DONE;
/* Check whether rx desc has an mbuf attached. */
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, cp_ring_size))
break;
cmpl_type = CMP_TYPE(rxcmp);
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t ring_mask, raw_cons, nb_tx_pkts = 0;
- struct bnxt_ring *cp_ring_struct;
struct cmpl_base *cp_desc_ring;
int rc;
raw_cons = cpr->cp_raw_cons;
cp_desc_ring = cpr->cp_desc_ring;
- cp_ring_struct = cpr->cp_ring_struct;
ring_mask = cpr->cp_ring_struct->ring_mask;
/* Check to see if hw has posted a completion for the descriptor. */
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = eth_dev->data->dev_private;
+ uint32_t cons, raw_cons, cp_ring_size;
struct bnxt_cp_ring_info *cpr;
struct cmpl_base *cmp;
- uint32_t raw_cons;
- uint32_t cons;
+
if (bp == NULL)
return;
return;
raw_cons = cpr->cp_raw_cons;
+ cp_ring_size = cpr->cp_ring_struct->ring_size;
pthread_mutex_lock(&bp->def_cp_lock);
while (1) {
if (!cpr || !cpr->cp_ring_struct || !cpr->cp_db.doorbell) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
cmp = &cpr->cp_desc_ring[cons];
- if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(cmp, raw_cons, cp_ring_size))
break;
bnxt_event_hwrm_resp_handler(bp, cmp);
raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
- return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+ return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons,
+ cpr->cp_ring_struct->ring_size);
}
/* TPA consume agg buffer out of order, allocate connected data only */
cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
- if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons,
+ cpr->cp_ring_struct->ring_size))
return -EBUSY;
if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START ||
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons,
+ cpr->cp_ring_struct->ring_size))
break;
if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) {
PMD_DRV_LOG(ERR, "Rx flush done\n");
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
nb_tx_pkts += txcmp->opaque;
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
opaque = rte_le_to_cpu_32(txcmp->opaque);