net/vmxnet3: fix Rx deadlock
authorStefan Puiu <stefan.puiu@gmail.com>
Mon, 19 Dec 2016 09:40:53 +0000 (11:40 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 17 Jan 2017 18:40:51 +0000 (19:40 +0100)
Our use case is that we have an app that needs to keep mbufs around
for a while. We've seen cases when calling vmxnet3_post_rx_bufs() from
vmxet3_recv_pkts(), it might not succeed to add any mbufs to any RX
descriptors (where it returns -err). Since there are no mbufs that the
virtual hardware can use, no packets will be received after this; the
driver won't refill the mbuf after this so it gets stuck in this
state. I call this a deadlock for lack of a better term - the virtual
HW waits for free mbufs, while the app waits for the hardware to
notify it for data (by flipping the generation bit on the used Rx
descriptors). Note that after this, the app can't recover.

This fix is a rework of this patch by Marco Lee:
http://dpdk.org/dev/patchwork/patch/6575/. I had to forward port
it, address review comments and also reverted the allocation
failure handling to the first version of the patch
(http://dpdk.org/ml/archives/dev/2015-July/022079.html), since
that's the only approach that seems to work, and seems to be what
other drivers are doing (I checked ixgbe and em). Reusing the mbuf
that's getting passed to the application doesn't seem to make
sense, and it was causing weird issues in our app. Also, reusing
rxm without checking if it's NULL could cause the code to crash.

Fixes: 14680e3747d5 ("vmxnet3: improve Rx performance")

Signed-off-by: Stefan Puiu <stefan.puiu@gmail.com>
Acked-by: Yong Wang <yongwang@vmware.com>
drivers/net/vmxnet3/vmxnet3_rxtx.c

index 3651369..b246884 100644 (file)
@@ -574,6 +574,32 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }
 
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+                  struct rte_mbuf *mbuf)
+{
+       uint32_t val = 0;
+       struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+       struct Vmxnet3_RxDesc *rxd =
+               (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+       vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+       if (ring_id == 0)
+               val = VMXNET3_RXD_BTYPE_HEAD;
+       else
+               val = VMXNET3_RXD_BTYPE_BODY;
+
+       buf_info->m = mbuf;
+       buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+       buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+
+       rxd->addr = buf_info->bufPA;
+       rxd->btype = val;
+       rxd->len = buf_info->len;
+       rxd->gen = ring->gen;
+
+       vmxnet3_cmd_ring_adv_next2fill(ring);
+}
 /*
  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
  *  so that device can receive packets in those buffers.
@@ -713,9 +739,18 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        }
 
        while (rcd->gen == rxq->comp_ring.gen) {
+               struct rte_mbuf *newm;
+
                if (nb_rx >= nb_pkts)
                        break;
 
+               newm = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(newm == NULL)) {
+                       PMD_RX_LOG(ERR, "Error allocating mbuf");
+                       rxq->stats.rx_buf_alloc_failure++;
+                       break;
+               }
+
                idx = rcd->rxdIdx;
                ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
                rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
@@ -815,8 +850,8 @@ rcd_done:
                VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
                                          rxq->cmd_ring[ring_idx].size);
 
-               /* It's time to allocate some new buf and renew descriptors */
-               vmxnet3_post_rx_bufs(rxq, ring_idx);
+               /* It's time to renew descriptors */
+               vmxnet3_renew_desc(rxq, ring_idx, newm);
                if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                        VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
                                               rxq->cmd_ring[ring_idx].next2fill);