X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_rxtx.c;h=c8c4ee10c866e0f8f07278a44f9cca6110379fef;hb=ac837bdd22400b220d60336f22c18091a92a40c9;hp=6a52885d5855ad59fe20da8638552a61ffc3077c;hpb=b757deb8e352b85fca8fc487269b1f5e70117633;p=dpdk.git diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index 6a52885d58..c8c4ee10c8 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -206,11 +206,13 @@ hn_chim_uninit(struct rte_eth_dev *dev) static uint32_t hn_chim_alloc(struct hn_data *hv) { uint32_t index = NVS_CHIM_IDX_INVALID; - uint64_t slab; + uint64_t slab = 0; rte_spinlock_lock(&hv->chim_lock); - if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) + if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) { + index += rte_bsf64(slab); rte_bitmap_clear(hv->chim_bmap, index); + } rte_spinlock_unlock(&hv->chim_lock); return index; @@ -519,35 +521,24 @@ next: return 0; } -/* - * Ack the consumed RXBUF associated w/ this channel packet, - * so that this RXBUF can be recycled by the hypervisor. - */ -static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb) -{ - struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo; - struct hn_data *hv = rxb->hv; - - if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) { - hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); - --hv->rxbuf_outstanding; - } -} - static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque) { - hn_rx_buf_release(opaque); + struct hn_rx_bufinfo *rxb = opaque; + struct hn_rx_queue *rxq = rxb->rxq; + + rte_atomic32_dec(&rxq->rxbuf_outstanding); + hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); } -static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq, +static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq, const struct vmbus_chanpkt_rxbuf *pkt) { struct hn_rx_bufinfo *rxb; - rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid; + rxb = rxq->rxbuf_info + pkt->hdr.xactid; rxb->chan = rxq->chan; rxb->xactid = pkt->hdr.xactid; - rxb->hv = rxq->hv; + rxb->rxq = rxq; rxb->shinfo.free_cb = hn_rx_buf_free_cb; rxb->shinfo.fcb_opaque = rxb; @@ -561,6 +552,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, { struct hn_data *hv = rxq->hv; struct rte_mbuf *m; + bool use_extbuf = false; m = rte_pktmbuf_alloc(rxq->mb_pool); if (unlikely(!m)) { @@ -576,7 +568,8 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, * some space available in receive area for later packets. */ if (dlen >= HN_RXCOPY_THRESHOLD && - hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) { + (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) < + hv->rxbuf_section_cnt / 2) { struct rte_mbuf_ext_shared_info *shinfo; const void *rxbuf; rte_iova_t iova; @@ -590,12 +583,14 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf); shinfo = &rxb->shinfo; - if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1) - ++hv->rxbuf_outstanding; + /* shinfo is already set to 1 by the caller */ + if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2) + rte_atomic32_inc(&rxq->rxbuf_outstanding); rte_pktmbuf_attach_extbuf(m, data, iova, dlen + headroom, shinfo); m->data_off = headroom; + use_extbuf = true; } else { /* Mbuf's in pool must be large enough to hold small packets */ if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) { @@ -623,6 +618,8 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, if (!hv->vlan_strip && rte_vlan_insert(&m)) { PMD_DRV_LOG(DEBUG, "vlan insert failed"); ++rxq->stats.errors; + if (use_extbuf) + rte_pktmbuf_detach_extbuf(m); rte_pktmbuf_free(m); return; } @@ -657,6 +654,8 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) { ++rxq->stats.ring_full; PMD_RX_LOG(DEBUG, "rx ring full"); + if (use_extbuf) + rte_pktmbuf_detach_extbuf(m); rte_pktmbuf_free(m); } } @@ -832,7 +831,8 @@ hn_nvs_handle_rxbuf(struct rte_eth_dev *dev, } /* Send ACK now if external mbuf not used */ - hn_rx_buf_release(rxb); + if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0) + hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid); } /* @@ -888,6 +888,23 @@ struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, return NULL; } + /* setup rxbuf_info for non-primary queue */ + if (queue_id) { + rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO", + hv->rxbuf_section_cnt, + sizeof(*rxq->rxbuf_info), + RTE_CACHE_LINE_SIZE); + + if (!rxq->rxbuf_info) { + PMD_DRV_LOG(ERR, + "Could not allocate rxbuf info for queue %d\n", + queue_id); + rte_free(rxq->event_buf); + rte_free(rxq); + return NULL; + } + } + return rxq; } @@ -953,6 +970,7 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, fail: rte_ring_free(rxq->rx_ring); + rte_free(rxq->rxbuf_info); rte_free(rxq->event_buf); rte_free(rxq); return error; @@ -975,6 +993,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) if (keep_primary && rxq == rxq->hv->primary) return; + rte_free(rxq->rxbuf_info); rte_free(rxq->event_buf); rte_free(rxq); } @@ -1423,11 +1442,12 @@ static int hn_xmit_sg(struct hn_tx_queue *txq, hn_rndis_dump(txd->rndis_pkt); /* pass IOVA of rndis header in first segment */ - addr = rte_malloc_virt2iova(txd->rndis_pkt); + addr = rte_malloc_virt2iova(txq->tx_rndis); if (unlikely(addr == RTE_BAD_IOVA)) { PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); return -EINVAL; } + addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis); sg[0].page = addr / PAGE_SIZE; sg[0].ofs = addr & PAGE_MASK;