resp.nvs_sect[0].slotcnt);
hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
- hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
- sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
- if (!hv->rxbuf_info) {
+ /*
+ * Pimary queue's rxbuf_info is not allocated at creation time.
+ * Now we can allocate it after we figure out the slotcnt.
+ */
+ hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+ hv->rxbuf_section_cnt,
+ sizeof(*hv->primary->rxbuf_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!hv->primary->rxbuf_info) {
PMD_DRV_LOG(ERR,
"could not allocate rxbuf info");
return -ENOMEM;
error);
}
- rte_free(hv->rxbuf_info);
/*
* Linger long enough for NVS to disconnect RXBUF.
*/
static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
{
struct hn_rx_bufinfo *rxb = opaque;
- struct hn_data *hv = rxb->hv;
+ struct hn_rx_queue *rxq = rxb->rxq;
- rte_atomic32_dec(&hv->rxbuf_outstanding);
+ rte_atomic32_dec(&rxq->rxbuf_outstanding);
hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
}
-static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
const struct vmbus_chanpkt_rxbuf *pkt)
{
struct hn_rx_bufinfo *rxb;
- rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+ rxb = rxq->rxbuf_info + pkt->hdr.xactid;
rxb->chan = rxq->chan;
rxb->xactid = pkt->hdr.xactid;
- rxb->hv = rxq->hv;
+ rxb->rxq = rxq;
rxb->shinfo.free_cb = hn_rx_buf_free_cb;
rxb->shinfo.fcb_opaque = rxb;
* some space available in receive area for later packets.
*/
if (dlen >= HN_RXCOPY_THRESHOLD &&
- (uint32_t)rte_atomic32_read(&hv->rxbuf_outstanding) <
+ (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
hv->rxbuf_section_cnt / 2) {
struct rte_mbuf_ext_shared_info *shinfo;
const void *rxbuf;
/* shinfo is already set to 1 by the caller */
if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
- rte_atomic32_inc(&hv->rxbuf_outstanding);
+ rte_atomic32_inc(&rxq->rxbuf_outstanding);
rte_pktmbuf_attach_extbuf(m, data, iova,
dlen + headroom, shinfo);
return NULL;
}
+ /* setup rxbuf_info for non-primary queue */
+ if (queue_id) {
+ rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+ hv->rxbuf_section_cnt,
+ sizeof(*rxq->rxbuf_info),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!rxq->rxbuf_info) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate rxbuf info for queue %d\n",
+ queue_id);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ return NULL;
+ }
+ }
+
return rxq;
}
fail:
rte_ring_free(rxq->rx_ring);
+ rte_free(rxq->rxbuf_info);
rte_free(rxq->event_buf);
rte_free(rxq);
return error;
if (keep_primary && rxq == rxq->hv->primary)
return;
+ rte_free(rxq->rxbuf_info);
rte_free(rxq->event_buf);
rte_free(rxq);
}
struct hn_stats stats;
void *event_buf;
+ struct hn_rx_bufinfo *rxbuf_info;
+ rte_atomic32_t rxbuf_outstanding;
};
/* multi-packet data from host */
struct hn_rx_bufinfo {
struct vmbus_channel *chan;
- struct hn_data *hv;
+ struct hn_rx_queue *rxq;
uint64_t xactid;
struct rte_mbuf_ext_shared_info shinfo;
} __rte_cache_aligned;
uint32_t link_speed;
struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
- struct hn_rx_bufinfo *rxbuf_info;
uint32_t rxbuf_section_cnt; /* # of Rx sections */
- rte_atomic32_t rxbuf_outstanding;
uint16_t max_queues; /* Max available queues */
uint16_t num_queues;
uint64_t rss_offloads;