static uint32_t hn_chim_alloc(struct hn_data *hv)
{
uint32_t index = NVS_CHIM_IDX_INVALID;
- uint64_t slab;
+ uint64_t slab = 0;
rte_spinlock_lock(&hv->chim_lock);
- if (rte_bitmap_scan(hv->chim_bmap, &index, &slab))
+ if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) {
+ index += rte_bsf64(slab);
rte_bitmap_clear(hv->chim_bmap, index);
+ }
rte_spinlock_unlock(&hv->chim_lock);
return index;
return 0;
}
-/*
- * Ack the consumed RXBUF associated w/ this channel packet,
- * so that this RXBUF can be recycled by the hypervisor.
- */
-static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
-{
- struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
- struct hn_data *hv = rxb->hv;
-
- if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
- hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
- --hv->rxbuf_outstanding;
- }
-}
-
static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
{
- hn_rx_buf_release(opaque);
+ struct hn_rx_bufinfo *rxb = opaque;
+ struct hn_rx_queue *rxq = rxb->rxq;
+
+ rte_atomic32_dec(&rxq->rxbuf_outstanding);
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
}
-static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
const struct vmbus_chanpkt_rxbuf *pkt)
{
struct hn_rx_bufinfo *rxb;
- rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+ rxb = rxq->rxbuf_info + pkt->hdr.xactid;
rxb->chan = rxq->chan;
rxb->xactid = pkt->hdr.xactid;
- rxb->hv = rxq->hv;
+ rxb->rxq = rxq;
rxb->shinfo.free_cb = hn_rx_buf_free_cb;
rxb->shinfo.fcb_opaque = rxb;
{
struct hn_data *hv = rxq->hv;
struct rte_mbuf *m;
+ bool use_extbuf = false;
m = rte_pktmbuf_alloc(rxq->mb_pool);
if (unlikely(!m)) {
* some space available in receive area for later packets.
*/
if (dlen >= HN_RXCOPY_THRESHOLD &&
- hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
+ (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
+ hv->rxbuf_section_cnt / 2) {
struct rte_mbuf_ext_shared_info *shinfo;
const void *rxbuf;
rte_iova_t iova;
iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
shinfo = &rxb->shinfo;
- if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
- ++hv->rxbuf_outstanding;
+ /* shinfo is already set to 1 by the caller */
+ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
+ rte_atomic32_inc(&rxq->rxbuf_outstanding);
rte_pktmbuf_attach_extbuf(m, data, iova,
dlen + headroom, shinfo);
m->data_off = headroom;
+ use_extbuf = true;
} else {
/* Mbuf's in pool must be large enough to hold small packets */
if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
if (!hv->vlan_strip && rte_vlan_insert(&m)) {
PMD_DRV_LOG(DEBUG, "vlan insert failed");
++rxq->stats.errors;
+ if (use_extbuf)
+ rte_pktmbuf_detach_extbuf(m);
rte_pktmbuf_free(m);
return;
}
if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
++rxq->stats.ring_full;
PMD_RX_LOG(DEBUG, "rx ring full");
+ if (use_extbuf)
+ rte_pktmbuf_detach_extbuf(m);
rte_pktmbuf_free(m);
}
}
}
/* Send ACK now if external mbuf not used */
- hn_rx_buf_release(rxb);
+ if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0)
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
}
/*
return NULL;
}
+ /* setup rxbuf_info for non-primary queue */
+ if (queue_id) {
+ rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+ hv->rxbuf_section_cnt,
+ sizeof(*rxq->rxbuf_info),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!rxq->rxbuf_info) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate rxbuf info for queue %d\n",
+ queue_id);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ return NULL;
+ }
+ }
+
return rxq;
}
fail:
rte_ring_free(rxq->rx_ring);
+ rte_free(rxq->rxbuf_info);
rte_free(rxq->event_buf);
rte_free(rxq);
return error;
if (keep_primary && rxq == rxq->hv->primary)
return;
+ rte_free(rxq->rxbuf_info);
rte_free(rxq->event_buf);
rte_free(rxq);
}
hn_rndis_dump(txd->rndis_pkt);
/* pass IOVA of rndis header in first segment */
- addr = rte_malloc_virt2iova(txd->rndis_pkt);
+ addr = rte_malloc_virt2iova(txq->tx_rndis);
if (unlikely(addr == RTE_BAD_IOVA)) {
PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
return -EINVAL;
}
+ addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
sg[0].page = addr / PAGE_SIZE;
sg[0].ofs = addr & PAGE_MASK;