When rte_pktmbuf_attach_extbuf() is used, the driver should not decrease
the reference count in its callback function hn_rx_buf_free_cb, because
the reference count is already decreased by rte_pktmbuf. Doing it twice
may result in underflow and driver may never send an ack packet over
vmbus to host.
Also declares rxbuf_outstanding as atomic, because this value is shared
among all receive queues.
Fixes:
4e9c73e96e83 ("net/netvsc: add Hyper-V network device")
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
/* Silently drop received packets while waiting for response */
if (hdr->type == NVS_TYPE_RNDIS) {
hn_nvs_ack_rxbuf(chan, xactid);
/* Silently drop received packets while waiting for response */
if (hdr->type == NVS_TYPE_RNDIS) {
hn_nvs_ack_rxbuf(chan, xactid);
- --hv->rxbuf_outstanding;
-/*
- * Ack the consumed RXBUF associated w/ this channel packet,
- * so that this RXBUF can be recycled by the hypervisor.
- */
-static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
+static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
- struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
+ struct hn_rx_bufinfo *rxb = opaque;
struct hn_data *hv = rxb->hv;
struct hn_data *hv = rxb->hv;
- if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
- hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
- --hv->rxbuf_outstanding;
- }
-}
-
-static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
-{
- hn_rx_buf_release(opaque);
+ rte_atomic32_dec(&hv->rxbuf_outstanding);
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
}
static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
}
static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
* some space available in receive area for later packets.
*/
if (dlen >= HN_RXCOPY_THRESHOLD &&
* some space available in receive area for later packets.
*/
if (dlen >= HN_RXCOPY_THRESHOLD &&
- hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
+ (uint32_t)rte_atomic32_read(&hv->rxbuf_outstanding) <
+ hv->rxbuf_section_cnt / 2) {
struct rte_mbuf_ext_shared_info *shinfo;
const void *rxbuf;
rte_iova_t iova;
struct rte_mbuf_ext_shared_info *shinfo;
const void *rxbuf;
rte_iova_t iova;
iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
shinfo = &rxb->shinfo;
iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
shinfo = &rxb->shinfo;
- if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
- ++hv->rxbuf_outstanding;
+ /* shinfo is already set to 1 by the caller */
+ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
+ rte_atomic32_inc(&hv->rxbuf_outstanding);
rte_pktmbuf_attach_extbuf(m, data, iova,
dlen + headroom, shinfo);
rte_pktmbuf_attach_extbuf(m, data, iova,
dlen + headroom, shinfo);
}
/* Send ACK now if external mbuf not used */
}
/* Send ACK now if external mbuf not used */
- hn_rx_buf_release(rxb);
+ if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0)
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
struct hn_rx_bufinfo *rxbuf_info;
uint32_t rxbuf_section_cnt; /* # of Rx sections */
struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
struct hn_rx_bufinfo *rxbuf_info;
uint32_t rxbuf_section_cnt; /* # of Rx sections */
- volatile uint32_t rxbuf_outstanding;
+ rte_atomic32_t rxbuf_outstanding;
uint16_t max_queues; /* Max available queues */
uint16_t num_queues;
uint64_t rss_offloads;
uint16_t max_queues; /* Max available queues */
uint16_t num_queues;
uint64_t rss_offloads;