+#define EVENT_DATA1_FLAGS_MASK \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK
+
+#define EVENT_DATA1_FLAGS_MASTER_FUNC \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC
+
+#define EVENT_DATA1_FLAGS_RECOVERY_ENABLED \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED
+
+bool bnxt_is_recovery_enabled(struct bnxt *bp);
+bool bnxt_is_master_func(struct bnxt *bp);
+
+void bnxt_stop_rxtx(struct bnxt *bp);
+
+/**
+ * Check validity of a completion ring entry. If the entry is valid, include a
+ * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * completion are not hoisted by the compiler or by the CPU to come before the
+ * loading of the "valid" field.
+ *
+ * Note: the caller must not access any fields in the specified completion
+ * entry prior to calling this function.
+ *
+ * @param cmpl
+ * Pointer to an entry in the completion ring.
+ * @param raw_cons
+ * Raw consumer index of entry in completion ring.
+ * @param ring_size
+ * Size of completion ring.
+ */
+static __rte_always_inline bool
+bnxt_cpr_cmp_valid(const void *cmpl, uint32_t raw_cons, uint32_t ring_size)
+{
+ const struct cmpl_base *c = cmpl;
+ bool expected, valid;
+
+ expected = !(raw_cons & ring_size);
+ valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
+ if (valid == expected) {
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ return true;
+ }
+ return false;
+}