There is a race condition while processing RAMROD
completion in fast path queue through interrupt handler
and polling method.
Interrupt handler invoked from actual interrupt event
and from RAMROD processing polling flow may create a
situation where one flow will read and clear a fastpath
interrupt without actually processing the RAMROD completion.
Thus, causing a RAMROD timeout even though HW sent an
completion event.
Fix this by introducing an atomic variable which will be
set only when interrupt handler needs to process RAMROD
completion.
Fixes:
540a211084a7 ("bnx2x: driver core")
Cc: stable@dpdk.org
Signed-off-by: Shahed Shaikh <shshaikh@marvell.com>
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
atomic_load_acq_long(&sc->cq_spq_left),
atomic_load_acq_long(&sc->eq_spq_left));
+ /* RAMROD completion is processed in bnx2x_intr_legacy()
+ * which can run from different contexts.
+ * Ask bnx2x_intr_intr() to process RAMROD
+ * completion whenever it gets scheduled.
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
bnx2x_sp_prod_update(sc);
return 0;
bnx2x_sp_prod_update(sc);
return 0;
-static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
+static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp)
{
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
{
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
+ if (rte_atomic32_read(&sc->scan_fp) == 1) {
if (bnx2x_has_rx_work(fp)) {
more_rx = bnx2x_rxeof(sc, fp);
}
if (more_rx) {
/* still more work to do */
if (bnx2x_has_rx_work(fp)) {
more_rx = bnx2x_rxeof(sc, fp);
}
if (more_rx) {
/* still more work to do */
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
* then calls a separate routine to handle the various
* interrupt causes: link, RX, and TX.
*/
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
+int bnx2x_intr_legacy(struct bnx2x_softc *sc)
{
struct bnx2x_fastpath *fp;
uint32_t status, mask;
{
struct bnx2x_fastpath *fp;
uint32_t status, mask;
/* acknowledge and disable further fastpath interrupts */
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
0, IGU_INT_DISABLE, 0);
/* acknowledge and disable further fastpath interrupts */
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
0, IGU_INT_DISABLE, 0);
- bnx2x_handle_fp_tq(fp, scan_fp);
+ bnx2x_handle_fp_tq(fp);
#define PERIODIC_STOP 0
#define PERIODIC_GO 1
volatile unsigned long periodic_flags;
#define PERIODIC_STOP 0
#define PERIODIC_GO 1
volatile unsigned long periodic_flags;
+ rte_atomic32_t scan_fp;
struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
struct bnx2x_fastpath fp[MAX_RSS_CHAINS];
struct bnx2x_sp_objs sp_objs[MAX_RSS_CHAINS];
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
void bnx2x_print_device_info(struct bnx2x_softc *sc);
uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp);
void bnx2x_print_adapter_info(struct bnx2x_softc *sc);
void bnx2x_print_device_info(struct bnx2x_softc *sc);
-int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp);
+int bnx2x_intr_legacy(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- bnx2x_intr_legacy(sc, 0);
if (sc->periodic_flags & PERIODIC_GO)
bnx2x_periodic_callout(sc);
if (sc->periodic_flags & PERIODIC_GO)
bnx2x_periodic_callout(sc);
cnt *= 20;
ECORE_MSG(sc, "waiting for state to become %d", state);
cnt *= 20;
ECORE_MSG(sc, "waiting for state to become %d", state);
+ /* being over protective to remind bnx2x_intr_legacy() to
+ * process RAMROD
+ */
+ rte_atomic32_set(&sc->scan_fp, 1);
ECORE_MIGHT_SLEEP();
while (cnt--) {
ECORE_MIGHT_SLEEP();
while (cnt--) {
- bnx2x_intr_legacy(sc, 1);
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
+ rte_atomic32_set(&sc->scan_fp, 0);
return ECORE_SUCCESS;
}
ECORE_WAIT(sc, delay_us);
return ECORE_SUCCESS;
}
ECORE_WAIT(sc, delay_us);
+ if (sc->panic) {
+ rte_atomic32_set(&sc->scan_fp, 0);
}
/* timeout! */
PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
}
/* timeout! */
PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
+ rte_atomic32_set(&sc->scan_fp, 0);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif