1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
15 #include <rte_ethdev.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_memzone.h>
19 #include <rte_malloc.h>
20 #include <rte_atomic.h>
21 #include <rte_bitmap.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_ether.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
30 #include <rte_bus_vmbus.h>
31 #include <rte_spinlock.h>
39 #define HN_NVS_SEND_MSG_SIZE \
40 (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
42 #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
43 #define HN_TXCOPY_THRESHOLD 512
45 #define HN_RXCOPY_THRESHOLD 256
46 #define HN_RXQ_EVENT_DEFAULT 2048
55 #define HN_RXINFO_VLAN 0x0001
56 #define HN_RXINFO_CSUM 0x0002
57 #define HN_RXINFO_HASHINF 0x0004
58 #define HN_RXINFO_HASHVAL 0x0008
59 #define HN_RXINFO_ALL \
65 #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
66 #define HN_NDIS_RXCSUM_INFO_INVALID 0
67 #define HN_NDIS_HASH_INFO_INVALID 0
70 * Per-transmit book keeping.
71 * A slot in transmit ring (chim_index) is reserved for each transmit.
73 * There are two types of transmit:
74 * - buffered transmit where chimney buffer is used and RNDIS header
75 * is in the buffer. mbuf == NULL for this case.
77 * - direct transmit where RNDIS header is in the in rndis_pkt
78 * mbuf is freed after transmit.
80 * Descriptors come from per-port pool which is used
81 * to limit number of outstanding requests per device.
92 struct rndis_packet_msg *rndis_pkt;
95 #define HN_RNDIS_PKT_LEN \
96 (sizeof(struct rndis_packet_msg) + \
97 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
98 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
99 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
100 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
102 #define HN_RNDIS_PKT_ALIGNED RTE_ALIGN(HN_RNDIS_PKT_LEN, RTE_CACHE_LINE_SIZE)
104 /* Minimum space required for a packet */
105 #define HN_PKTSIZE_MIN(align) \
106 RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
108 #define DEFAULT_TX_FREE_THRESH 32
111 hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
113 uint32_t s = m->pkt_len;
114 const struct rte_ether_addr *ea;
117 stats->size_bins[1]++;
118 } else if (s > 64 && s < 1024) {
121 /* count zeros, and offset into correct bin */
122 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
123 stats->size_bins[bin]++;
126 stats->size_bins[0]++;
128 stats->size_bins[6]++;
130 stats->size_bins[7]++;
133 ea = rte_pktmbuf_mtod(m, const struct rte_ether_addr *);
134 if (rte_is_multicast_ether_addr(ea)) {
135 if (rte_is_broadcast_ether_addr(ea))
142 static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
144 return pkt->pktinfooffset + pkt->pktinfolen;
147 static inline uint32_t
148 hn_rndis_pktmsg_offset(uint32_t ofs)
150 return ofs - offsetof(struct rndis_packet_msg, dataoffset);
153 static void hn_txd_init(struct rte_mempool *mp __rte_unused,
154 void *opaque, void *obj, unsigned int idx)
156 struct hn_tx_queue *txq = opaque;
157 struct hn_txdesc *txd = obj;
159 memset(txd, 0, sizeof(*txd));
161 txd->queue_id = txq->queue_id;
162 txd->chim_index = NVS_CHIM_IDX_INVALID;
163 txd->rndis_pkt = (struct rndis_packet_msg *)(char *)txq->tx_rndis
164 + idx * HN_RNDIS_PKT_ALIGNED;
168 hn_chim_init(struct rte_eth_dev *dev)
170 struct hn_data *hv = dev->data->dev_private;
171 uint32_t i, chim_bmp_size;
173 rte_spinlock_init(&hv->chim_lock);
174 chim_bmp_size = rte_bitmap_get_memory_footprint(hv->chim_cnt);
175 hv->chim_bmem = rte_zmalloc("hn_chim_bitmap", chim_bmp_size,
176 RTE_CACHE_LINE_SIZE);
177 if (hv->chim_bmem == NULL) {
178 PMD_INIT_LOG(ERR, "failed to allocate bitmap size %u",
183 hv->chim_bmap = rte_bitmap_init(hv->chim_cnt,
184 hv->chim_bmem, chim_bmp_size);
185 if (hv->chim_bmap == NULL) {
186 PMD_INIT_LOG(ERR, "failed to init chim bitmap");
190 for (i = 0; i < hv->chim_cnt; i++)
191 rte_bitmap_set(hv->chim_bmap, i);
197 hn_chim_uninit(struct rte_eth_dev *dev)
199 struct hn_data *hv = dev->data->dev_private;
201 rte_bitmap_free(hv->chim_bmap);
202 rte_free(hv->chim_bmem);
203 hv->chim_bmem = NULL;
206 static uint32_t hn_chim_alloc(struct hn_data *hv)
208 uint32_t index = NVS_CHIM_IDX_INVALID;
211 rte_spinlock_lock(&hv->chim_lock);
212 if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) {
213 index += rte_bsf64(slab);
214 rte_bitmap_clear(hv->chim_bmap, index);
216 rte_spinlock_unlock(&hv->chim_lock);
221 static void hn_chim_free(struct hn_data *hv, uint32_t chim_idx)
223 if (chim_idx >= hv->chim_cnt) {
224 PMD_DRV_LOG(ERR, "Invalid chimney index %u", chim_idx);
226 rte_spinlock_lock(&hv->chim_lock);
227 rte_bitmap_set(hv->chim_bmap, chim_idx);
228 rte_spinlock_unlock(&hv->chim_lock);
232 static void hn_reset_txagg(struct hn_tx_queue *txq)
234 txq->agg_szleft = txq->agg_szmax;
235 txq->agg_pktleft = txq->agg_pktmax;
237 txq->agg_prevpkt = NULL;
241 hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
242 uint16_t queue_idx, uint16_t nb_desc,
243 unsigned int socket_id,
244 const struct rte_eth_txconf *tx_conf)
247 struct hn_data *hv = dev->data->dev_private;
248 struct hn_tx_queue *txq;
249 char name[RTE_MEMPOOL_NAMESIZE];
250 uint32_t tx_free_thresh;
253 PMD_INIT_FUNC_TRACE();
255 txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
261 txq->chan = hv->channels[queue_idx];
262 txq->port_id = dev->data->port_id;
263 txq->queue_id = queue_idx;
265 tx_free_thresh = tx_conf->tx_free_thresh;
266 if (tx_free_thresh == 0)
267 tx_free_thresh = RTE_MIN(nb_desc / 4,
268 DEFAULT_TX_FREE_THRESH);
270 if (tx_free_thresh + 3 >= nb_desc) {
272 "tx_free_thresh must be less than the number of TX entries minus 3(%u)."
273 " (tx_free_thresh=%u port=%u queue=%u)\n",
275 tx_free_thresh, dev->data->port_id, queue_idx);
279 txq->free_thresh = tx_free_thresh;
281 snprintf(name, sizeof(name),
282 "hn_txd_%u_%u", dev->data->port_id, queue_idx);
284 PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu",
285 name, nb_desc, sizeof(struct hn_txdesc));
287 txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc,
288 HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE);
289 if (txq->tx_rndis == NULL)
292 txq->txdesc_pool = rte_mempool_create(name, nb_desc,
293 sizeof(struct hn_txdesc),
296 dev->device->numa_node, 0);
297 if (txq->txdesc_pool == NULL) {
299 "mempool %s create failed: %d", name, rte_errno);
303 txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
304 txq->agg_pktmax = hv->rndis_agg_pkts;
305 txq->agg_align = hv->rndis_agg_align;
309 err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
312 dev->data->tx_queues[queue_idx] = txq;
317 if (txq->txdesc_pool)
318 rte_mempool_free(txq->txdesc_pool);
319 rte_free(txq->tx_rndis);
325 hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
326 struct rte_eth_txq_info *qinfo)
328 struct hn_tx_queue *txq = dev->data->tx_queues[queue_id];
330 qinfo->nb_desc = txq->txdesc_pool->size;
331 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
334 static struct hn_txdesc *hn_txd_get(struct hn_tx_queue *txq)
336 struct hn_txdesc *txd;
338 if (rte_mempool_get(txq->txdesc_pool, (void **)&txd)) {
339 ++txq->stats.ring_full;
340 PMD_TX_LOG(DEBUG, "tx pool exhausted!");
352 static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
354 rte_mempool_put(txq->txdesc_pool, txd);
358 hn_dev_tx_queue_release(void *arg)
360 struct hn_tx_queue *txq = arg;
362 PMD_INIT_FUNC_TRACE();
367 if (txq->txdesc_pool)
368 rte_mempool_free(txq->txdesc_pool);
370 rte_free(txq->tx_rndis);
375 * Check the status of a Tx descriptor in the queue.
378 * - -EINVAL - offset outside of tx_descriptor pool.
379 * - RTE_ETH_TX_DESC_FULL - descriptor is not acknowledged by host.
380 * - RTE_ETH_TX_DESC_DONE - descriptor is available.
382 int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
384 const struct hn_tx_queue *txq = arg;
386 hn_process_events(txq->hv, txq->queue_id, 0);
388 if (offset >= rte_mempool_avail_count(txq->txdesc_pool))
391 if (offset < rte_mempool_in_use_count(txq->txdesc_pool))
392 return RTE_ETH_TX_DESC_FULL;
394 return RTE_ETH_TX_DESC_DONE;
398 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
399 unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
401 struct hn_data *hv = dev->data->dev_private;
402 struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
403 struct hn_tx_queue *txq;
405 /* Control packets are sent with xacid == 0 */
409 txq = dev->data->tx_queues[queue_id];
410 if (likely(ack->status == NVS_STATUS_OK)) {
411 PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
412 txq->port_id, txq->queue_id, txd->chim_index,
413 txd->packets, txd->data_size);
414 txq->stats.bytes += txd->data_size;
415 txq->stats.packets += txd->packets;
417 PMD_DRV_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
418 txq->port_id, txq->queue_id, txd->chim_index, ack->status);
422 if (txd->chim_index != NVS_CHIM_IDX_INVALID) {
423 hn_chim_free(hv, txd->chim_index);
424 txd->chim_index = NVS_CHIM_IDX_INVALID;
427 rte_pktmbuf_free(txd->m);
428 hn_txd_put(txq, txd);
431 /* Handle transmit completion events */
433 hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
434 const struct vmbus_chanpkt_hdr *pkt,
437 const struct hn_nvs_hdr *hdr = data;
440 case NVS_TYPE_RNDIS_ACK:
441 hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
445 PMD_DRV_LOG(NOTICE, "unexpected send completion type %u",
450 /* Parse per-packet info (meta data) */
452 hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
453 struct hn_rxinfo *info)
455 const struct rndis_pktinfo *pi = info_data;
458 while (info_dlen != 0) {
462 if (unlikely(info_dlen < sizeof(*pi)))
465 if (unlikely(info_dlen < pi->size))
467 info_dlen -= pi->size;
469 if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
471 if (unlikely(pi->size < pi->offset))
474 dlen = pi->size - pi->offset;
478 case NDIS_PKTINFO_TYPE_VLAN:
479 if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
481 info->vlan_info = *((const uint32_t *)data);
482 mask |= HN_RXINFO_VLAN;
485 case NDIS_PKTINFO_TYPE_CSUM:
486 if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
488 info->csum_info = *((const uint32_t *)data);
489 mask |= HN_RXINFO_CSUM;
492 case NDIS_PKTINFO_TYPE_HASHVAL:
493 if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
495 info->hash_value = *((const uint32_t *)data);
496 mask |= HN_RXINFO_HASHVAL;
499 case NDIS_PKTINFO_TYPE_HASHINF:
500 if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
502 info->hash_info = *((const uint32_t *)data);
503 mask |= HN_RXINFO_HASHINF;
510 if (mask == HN_RXINFO_ALL)
511 break; /* All found; done */
513 pi = (const struct rndis_pktinfo *)
514 ((const uint8_t *)pi + pi->size);
519 * - If there is no hash value, invalidate the hash info.
521 if (!(mask & HN_RXINFO_HASHVAL))
522 info->hash_info = HN_NDIS_HASH_INFO_INVALID;
526 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
528 struct hn_rx_bufinfo *rxb = opaque;
529 struct hn_rx_queue *rxq = rxb->rxq;
531 rte_atomic32_dec(&rxq->rxbuf_outstanding);
532 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
535 static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
536 const struct vmbus_chanpkt_rxbuf *pkt)
538 struct hn_rx_bufinfo *rxb;
540 rxb = rxq->rxbuf_info + pkt->hdr.xactid;
541 rxb->chan = rxq->chan;
542 rxb->xactid = pkt->hdr.xactid;
545 rxb->shinfo.free_cb = hn_rx_buf_free_cb;
546 rxb->shinfo.fcb_opaque = rxb;
547 rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
551 static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
552 uint8_t *data, unsigned int headroom, unsigned int dlen,
553 const struct hn_rxinfo *info)
555 struct hn_data *hv = rxq->hv;
557 bool use_extbuf = false;
559 m = rte_pktmbuf_alloc(rxq->mb_pool);
561 struct rte_eth_dev *dev =
562 &rte_eth_devices[rxq->port_id];
564 dev->data->rx_mbuf_alloc_failed++;
569 * For large packets, avoid copy if possible but need to keep
570 * some space available in receive area for later packets.
572 if (dlen >= HN_RXCOPY_THRESHOLD &&
573 (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
574 hv->rxbuf_section_cnt / 2) {
575 struct rte_mbuf_ext_shared_info *shinfo;
580 * Build an external mbuf that points to recveive area.
581 * Use refcount to handle multiple packets in same
582 * receive buffer section.
584 rxbuf = hv->rxbuf_res->addr;
585 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
586 shinfo = &rxb->shinfo;
588 /* shinfo is already set to 1 by the caller */
589 if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
590 rte_atomic32_inc(&rxq->rxbuf_outstanding);
592 rte_pktmbuf_attach_extbuf(m, data, iova,
593 dlen + headroom, shinfo);
594 m->data_off = headroom;
597 /* Mbuf's in pool must be large enough to hold small packets */
598 if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
599 rte_pktmbuf_free_seg(m);
603 rte_memcpy(rte_pktmbuf_mtod(m, void *),
604 data + headroom, dlen);
607 m->port = rxq->port_id;
610 m->packet_type = rte_net_get_ptype(m, NULL,
615 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
616 m->vlan_tci = info->vlan_info;
617 m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
619 /* NDIS always strips tag, put it back if necessary */
620 if (!hv->vlan_strip && rte_vlan_insert(&m)) {
621 PMD_DRV_LOG(DEBUG, "vlan insert failed");
624 rte_pktmbuf_detach_extbuf(m);
630 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
631 if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
632 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
634 if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
635 | NDIS_RXCSUM_INFO_TCPCS_OK))
636 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
637 else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
638 | NDIS_RXCSUM_INFO_UDPCS_FAILED))
639 m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
642 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
643 m->ol_flags |= PKT_RX_RSS_HASH;
644 m->hash.rss = info->hash_value;
648 "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
649 rxq->port_id, rxq->queue_id, rxb->xactid,
650 m->pkt_len, m->packet_type, m->ol_flags);
652 ++rxq->stats.packets;
653 rxq->stats.bytes += m->pkt_len;
654 hn_update_packet_stats(&rxq->stats, m);
656 if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
657 ++rxq->stats.ring_full;
658 PMD_RX_LOG(DEBUG, "rx ring full");
660 rte_pktmbuf_detach_extbuf(m);
665 static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
666 struct hn_rx_bufinfo *rxb,
667 void *data, uint32_t dlen)
669 unsigned int data_off, data_len, total_len;
670 unsigned int pktinfo_off, pktinfo_len;
671 const struct rndis_packet_msg *pkt = data;
672 struct hn_rxinfo info = {
673 .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
674 .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
675 .hash_info = HN_NDIS_HASH_INFO_INVALID,
681 if (unlikely(dlen < sizeof(*pkt)))
684 if (unlikely(dlen < pkt->len))
685 goto error; /* truncated RNDIS from host */
687 if (unlikely(pkt->len < pkt->datalen
688 + pkt->oobdatalen + pkt->pktinfolen))
691 if (unlikely(pkt->datalen == 0))
695 if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
698 if (likely(pkt->pktinfooffset > 0) &&
699 unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
700 (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
703 data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
704 data_len = pkt->datalen;
705 pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
706 pktinfo_len = pkt->pktinfolen;
708 if (likely(pktinfo_len > 0)) {
709 err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
715 if (__builtin_add_overflow(data_off, data_len, &total_len) ||
716 total_len > pkt->len)
719 if (unlikely(data_len < RTE_ETHER_HDR_LEN))
722 hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
729 hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
730 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
732 const struct rndis_msghdr *hdr = buf;
735 case RNDIS_PACKET_MSG:
736 if (dev->data->dev_started)
737 hn_rndis_rx_data(rxq, rxb, buf, len);
740 case RNDIS_INDICATE_STATUS_MSG:
741 hn_rndis_link_status(dev, buf);
744 case RNDIS_INITIALIZE_CMPLT:
745 case RNDIS_QUERY_CMPLT:
746 case RNDIS_SET_CMPLT:
747 hn_rndis_receive_response(rxq->hv, buf, len);
752 "unexpected RNDIS message (type %#x len %u)",
759 hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
761 struct hn_rx_queue *rxq,
762 const struct vmbus_chanpkt_hdr *hdr,
765 const struct vmbus_chanpkt_rxbuf *pkt;
766 const struct hn_nvs_hdr *nvs_hdr = buf;
767 uint32_t rxbuf_sz = hv->rxbuf_res->len;
768 char *rxbuf = hv->rxbuf_res->addr;
769 unsigned int i, hlen, count;
770 struct hn_rx_bufinfo *rxb;
772 /* At minimum we need type header */
773 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
774 PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
778 /* Make sure that this is a RNDIS message. */
779 if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
780 PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
785 hlen = vmbus_chanpkt_getlen(hdr->hlen);
786 if (unlikely(hlen < sizeof(*pkt))) {
787 PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
791 pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
792 if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
793 PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
798 count = pkt->rxbuf_cnt;
799 if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
801 PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
805 if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
806 PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
811 /* Setup receive buffer info to allow for callback */
812 rxb = hn_rx_buf_init(rxq, pkt);
814 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
815 for (i = 0; i < count; ++i) {
816 unsigned int ofs, len;
818 ofs = pkt->rxbuf[i].ofs;
819 len = pkt->rxbuf[i].len;
821 if (unlikely(ofs + len > rxbuf_sz)) {
823 "%uth RNDIS msg overflow ofs %u, len %u",
828 if (unlikely(len == 0)) {
829 PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
833 hn_rndis_receive(dev, rxq, rxb,
837 /* Send ACK now if external mbuf not used */
838 if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0)
839 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
843 * Called when NVS inband events are received.
844 * Send up a two part message with port_id and the NVS message
845 * to the pipe to the netvsc-vf-event control thread.
847 static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
848 const struct vmbus_chanpkt_hdr *pkt,
851 const struct hn_nvs_hdr *hdr = data;
854 case NVS_TYPE_TXTBL_NOTE:
855 /* Transmit indirection table has locking problems
856 * in DPDK and therefore not implemented
858 PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
861 case NVS_TYPE_VFASSOC_NOTE:
862 hn_nvs_handle_vfassoc(dev, pkt, data);
867 "got notify, nvs type %u", hdr->type);
871 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
873 unsigned int socket_id)
875 struct hn_rx_queue *rxq;
877 rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
878 RTE_CACHE_LINE_SIZE, socket_id);
883 rxq->chan = hv->channels[queue_id];
884 rte_spinlock_init(&rxq->ring_lock);
885 rxq->port_id = hv->port_id;
886 rxq->queue_id = queue_id;
887 rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
888 rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT,
889 RTE_CACHE_LINE_SIZE, socket_id);
890 if (!rxq->event_buf) {
895 /* setup rxbuf_info for non-primary queue */
897 rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
898 hv->rxbuf_section_cnt,
899 sizeof(*rxq->rxbuf_info),
900 RTE_CACHE_LINE_SIZE);
902 if (!rxq->rxbuf_info) {
904 "Could not allocate rxbuf info for queue %d\n",
906 rte_free(rxq->event_buf);
916 hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
917 struct rte_eth_rxq_info *qinfo)
919 struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
921 qinfo->mp = rxq->mb_pool;
922 qinfo->nb_desc = rxq->rx_ring->size;
923 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
927 hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
928 uint16_t queue_idx, uint16_t nb_desc,
929 unsigned int socket_id,
930 const struct rte_eth_rxconf *rx_conf,
931 struct rte_mempool *mp)
933 struct hn_data *hv = dev->data->dev_private;
934 char ring_name[RTE_RING_NAMESIZE];
935 struct hn_rx_queue *rxq;
939 PMD_INIT_FUNC_TRACE();
941 if (queue_idx == 0) {
944 rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
950 count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
951 if (nb_desc == 0 || nb_desc > count)
955 * Staging ring from receive event logic to rx_pkts.
956 * rx_pkts assumes caller is handling multi-thread issue.
957 * event logic has locking.
959 snprintf(ring_name, sizeof(ring_name),
960 "hn_rx_%u_%u", dev->data->port_id, queue_idx);
961 rxq->rx_ring = rte_ring_create(ring_name,
962 rte_align32pow2(nb_desc),
967 error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
968 socket_id, rx_conf, mp);
972 dev->data->rx_queues[queue_idx] = rxq;
976 rte_ring_free(rxq->rx_ring);
977 rte_free(rxq->rxbuf_info);
978 rte_free(rxq->event_buf);
984 hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
990 rte_ring_free(rxq->rx_ring);
994 hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
996 /* Keep primary queue to allow for control operations */
997 if (keep_primary && rxq == rxq->hv->primary)
1000 rte_free(rxq->rxbuf_info);
1001 rte_free(rxq->event_buf);
1006 hn_dev_rx_queue_release(void *arg)
1008 struct hn_rx_queue *rxq = arg;
1010 PMD_INIT_FUNC_TRACE();
1012 hn_rx_queue_free(rxq, true);
1016 * Get the number of used descriptor in a rx queue
1017 * For this device that means how many packets are pending in the ring.
1020 hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
1022 struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
1024 return rte_ring_count(rxq->rx_ring);
1028 * Check the status of a Rx descriptor in the queue
1031 * - -EINVAL - offset outside of ring
1032 * - RTE_ETH_RX_DESC_AVAIL - no data available yet
1033 * - RTE_ETH_RX_DESC_DONE - data is waiting in stagin ring
1035 int hn_dev_rx_queue_status(void *arg, uint16_t offset)
1037 const struct hn_rx_queue *rxq = arg;
1039 hn_process_events(rxq->hv, rxq->queue_id, 0);
1040 if (offset >= rxq->rx_ring->capacity)
1043 if (offset < rte_ring_count(rxq->rx_ring))
1044 return RTE_ETH_RX_DESC_DONE;
1046 return RTE_ETH_RX_DESC_AVAIL;
1050 hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
1052 struct hn_tx_queue *txq = arg;
1054 return hn_process_events(txq->hv, txq->queue_id, free_cnt);
1058 * Process pending events on the channel.
1059 * Called from both Rx queue poll and Tx cleanup
1061 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
1064 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
1065 struct hn_rx_queue *rxq;
1066 uint32_t bytes_read = 0;
1067 uint32_t tx_done = 0;
1070 rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
1073 * Since channel is shared between Rx and TX queue need to have a lock
1074 * since DPDK does not force same CPU to be used for Rx/Tx.
1076 if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
1080 const struct vmbus_chanpkt_hdr *pkt;
1081 uint32_t len = rxq->event_sz;
1085 ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
1087 break; /* ring is empty */
1089 if (unlikely(ret == -ENOBUFS)) {
1090 /* event buffer not large enough to read ring */
1093 "event buffer expansion (need %u)", len);
1094 rxq->event_sz = len + len / 4;
1095 rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz,
1096 RTE_CACHE_LINE_SIZE);
1099 /* out of memory, no more events now */
1104 if (unlikely(ret <= 0)) {
1105 /* This indicates a failure to communicate (or worse) */
1106 rte_exit(EXIT_FAILURE,
1107 "vmbus ring buffer error: %d", ret);
1111 pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
1112 data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
1114 switch (pkt->type) {
1115 case VMBUS_CHANPKT_TYPE_COMP:
1117 hn_nvs_handle_comp(dev, queue_id, pkt, data);
1120 case VMBUS_CHANPKT_TYPE_RXBUF:
1121 hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
1124 case VMBUS_CHANPKT_TYPE_INBAND:
1125 hn_nvs_handle_notify(dev, pkt, data);
1129 PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
1133 if (tx_limit && tx_done >= tx_limit)
1138 rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
1140 rte_spinlock_unlock(&rxq->ring_lock);
1145 static void hn_append_to_chim(struct hn_tx_queue *txq,
1146 struct rndis_packet_msg *pkt,
1147 const struct rte_mbuf *m)
1149 struct hn_txdesc *txd = txq->agg_txd;
1150 uint8_t *buf = (uint8_t *)pkt;
1151 unsigned int data_offs;
1155 data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
1156 txd->chim_size += pkt->len;
1157 txd->data_size += m->pkt_len;
1159 hn_update_packet_stats(&txq->stats, m);
1161 for (; m; m = m->next) {
1162 uint16_t len = rte_pktmbuf_data_len(m);
1164 rte_memcpy(buf + data_offs,
1165 rte_pktmbuf_mtod(m, const char *), len);
1171 * Send pending aggregated data in chimney buffer (if any).
1172 * Returns error if send was unsuccessful because channel ring buffer
1175 static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
1178 struct hn_txdesc *txd = txq->agg_txd;
1179 struct hn_nvs_rndis rndis;
1185 rndis = (struct hn_nvs_rndis) {
1186 .type = NVS_TYPE_RNDIS,
1187 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
1188 .chim_idx = txd->chim_index,
1189 .chim_sz = txd->chim_size,
1192 PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
1193 txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
1195 ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
1196 &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
1198 if (likely(ret == 0))
1199 hn_reset_txagg(txq);
1200 else if (ret == -EAGAIN) {
1201 PMD_TX_LOG(DEBUG, "port %u:%u channel full",
1202 txq->port_id, txq->queue_id);
1203 ++txq->stats.channel_full;
1205 ++txq->stats.errors;
1207 PMD_DRV_LOG(NOTICE, "port %u:%u send failed: %d",
1208 txq->port_id, txq->queue_id, ret);
1214 * Try and find a place in a send chimney buffer to put
1215 * the small packet. If space is available, this routine
1216 * returns a pointer of where to place the data.
1217 * If no space, caller should try direct transmit.
1220 hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq,
1221 struct hn_txdesc *txd, uint32_t pktsize)
1223 struct hn_txdesc *agg_txd = txq->agg_txd;
1224 struct rndis_packet_msg *pkt;
1228 unsigned int padding, olen;
1231 * Update the previous RNDIS packet's total length,
1232 * it can be increased due to the mandatory alignment
1233 * padding for this RNDIS packet. And update the
1234 * aggregating txdesc's chimney sending buffer size
1237 * Zero-out the padding, as required by the RNDIS spec.
1239 pkt = txq->agg_prevpkt;
1241 padding = RTE_ALIGN(olen, txq->agg_align) - olen;
1243 agg_txd->chim_size += padding;
1244 pkt->len += padding;
1245 memset((uint8_t *)pkt + olen, 0, padding);
1248 chim = (uint8_t *)pkt + pkt->len;
1249 txq->agg_prevpkt = chim;
1251 txq->agg_szleft -= pktsize;
1252 if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
1254 * Probably can't aggregate more packets,
1255 * flush this aggregating txdesc proactively.
1257 txq->agg_pktleft = 0;
1260 hn_txd_put(txq, txd);
1264 txd->chim_index = hn_chim_alloc(hv);
1265 if (txd->chim_index == NVS_CHIM_IDX_INVALID)
1268 chim = (uint8_t *)hv->chim_res->addr
1269 + txd->chim_index * hv->chim_szmax;
1272 txq->agg_pktleft = txq->agg_pktmax - 1;
1273 txq->agg_szleft = txq->agg_szmax - pktsize;
1274 txq->agg_prevpkt = chim;
1279 static inline void *
1280 hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
1281 uint32_t pi_dlen, uint32_t pi_type)
1283 const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
1284 struct rndis_pktinfo *pi;
1287 * Per-packet-info does not move; it only grows.
1290 * pktinfooffset in this phase counts from the beginning
1291 * of rndis_packet_msg.
1293 pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
1295 pkt->pktinfolen += pi_size;
1299 pi->offset = RNDIS_PKTINFO_OFFSET;
1304 /* Put RNDIS header and packet info on packet */
1305 static void hn_encap(struct rndis_packet_msg *pkt,
1307 const struct rte_mbuf *m)
1309 unsigned int hlen = m->l2_len + m->l3_len;
1313 pkt->type = RNDIS_PACKET_MSG;
1314 pkt->len = m->pkt_len;
1315 pkt->dataoffset = 0;
1316 pkt->datalen = m->pkt_len;
1317 pkt->oobdataoffset = 0;
1318 pkt->oobdatalen = 0;
1319 pkt->oobdataelements = 0;
1320 pkt->pktinfooffset = sizeof(*pkt);
1321 pkt->pktinfolen = 0;
1326 * Set the hash value for this packet, to the queue_id to cause
1327 * TX done event for this packet on the right channel.
1329 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
1330 NDIS_PKTINFO_TYPE_HASHVAL);
1331 *pi_data = queue_id;
1333 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1334 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
1335 NDIS_PKTINFO_TYPE_VLAN);
1336 *pi_data = m->vlan_tci;
1339 if (m->ol_flags & PKT_TX_TCP_SEG) {
1340 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
1341 NDIS_PKTINFO_TYPE_LSO);
1343 if (m->ol_flags & PKT_TX_IPV6) {
1344 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
1347 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
1350 } else if (m->ol_flags &
1351 (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
1352 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
1353 NDIS_PKTINFO_TYPE_CSUM);
1356 if (m->ol_flags & PKT_TX_IPV6)
1357 *pi_data |= NDIS_TXCSUM_INFO_IPV6;
1358 if (m->ol_flags & PKT_TX_IPV4) {
1359 *pi_data |= NDIS_TXCSUM_INFO_IPV4;
1361 if (m->ol_flags & PKT_TX_IP_CKSUM)
1362 *pi_data |= NDIS_TXCSUM_INFO_IPCS;
1365 if (m->ol_flags & PKT_TX_TCP_CKSUM)
1366 *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
1367 else if (m->ol_flags & PKT_TX_UDP_CKSUM)
1368 *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
1371 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
1372 /* Fixup RNDIS packet message total length */
1373 pkt->len += pkt_hlen;
1375 /* Convert RNDIS packet message offsets */
1376 pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
1377 pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
1380 /* How many scatter gather list elements ar needed */
1381 static unsigned int hn_get_slots(const struct rte_mbuf *m)
1383 unsigned int slots = 1; /* for RNDIS header */
1386 unsigned int size = rte_pktmbuf_data_len(m);
1387 unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
1389 slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
1396 /* Build scatter gather list from chained mbuf */
1397 static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
1398 const struct rte_mbuf *m)
1400 unsigned int segs = 0;
1403 rte_iova_t addr = rte_mbuf_data_iova(m);
1404 unsigned int page = addr / PAGE_SIZE;
1405 unsigned int offset = addr & PAGE_MASK;
1406 unsigned int len = rte_pktmbuf_data_len(m);
1409 unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
1411 sg[segs].page = page;
1412 sg[segs].ofs = offset;
1413 sg[segs].len = bytes;
1426 /* Transmit directly from mbuf */
1427 static int hn_xmit_sg(struct hn_tx_queue *txq,
1428 const struct hn_txdesc *txd, const struct rte_mbuf *m,
1431 struct vmbus_gpa sg[hn_get_slots(m)];
1432 struct hn_nvs_rndis nvs_rndis = {
1433 .type = NVS_TYPE_RNDIS,
1434 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
1435 .chim_sz = txd->chim_size,
1440 /* attach aggregation data if present */
1441 if (txd->chim_size > 0)
1442 nvs_rndis.chim_idx = txd->chim_index;
1444 nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
1446 hn_rndis_dump(txd->rndis_pkt);
1448 /* pass IOVA of rndis header in first segment */
1449 addr = rte_malloc_virt2iova(txq->tx_rndis);
1450 if (unlikely(addr == RTE_BAD_IOVA)) {
1451 PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
1454 addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
1456 sg[0].page = addr / PAGE_SIZE;
1457 sg[0].ofs = addr & PAGE_MASK;
1458 sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
1461 hn_update_packet_stats(&txq->stats, m);
1463 segs += hn_fill_sg(sg + 1, m);
1465 PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
1466 txq->port_id, txq->queue_id, txd->chim_index,
1467 segs, nvs_rndis.chim_sz);
1469 return hn_nvs_send_sglist(txq->chan, sg, segs,
1470 &nvs_rndis, sizeof(nvs_rndis),
1471 (uintptr_t)txd, need_sig);
1475 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1477 struct hn_tx_queue *txq = ptxq;
1478 uint16_t queue_id = txq->queue_id;
1479 struct hn_data *hv = txq->hv;
1480 struct rte_eth_dev *vf_dev;
1481 bool need_sig = false;
1482 uint16_t nb_tx, tx_thresh;
1485 if (unlikely(hv->closed))
1489 * Always check for events on the primary channel
1490 * because that is where hotplug notifications occur.
1492 tx_thresh = RTE_MAX(txq->free_thresh, nb_pkts);
1493 if (txq->queue_id == 0 ||
1494 rte_mempool_avail_count(txq->txdesc_pool) < tx_thresh)
1495 hn_process_events(hv, txq->queue_id, 0);
1497 /* Transmit over VF if present and up */
1498 rte_rwlock_read_lock(&hv->vf_lock);
1499 vf_dev = hn_get_vf_dev(hv);
1500 if (vf_dev && vf_dev->data->dev_started) {
1501 void *sub_q = vf_dev->data->tx_queues[queue_id];
1503 nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
1504 rte_rwlock_read_unlock(&hv->vf_lock);
1507 rte_rwlock_read_unlock(&hv->vf_lock);
1509 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1510 struct rte_mbuf *m = tx_pkts[nb_tx];
1511 uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
1512 struct rndis_packet_msg *pkt;
1513 struct hn_txdesc *txd;
1515 txd = hn_txd_get(txq);
1519 /* For small packets aggregate them in chimney buffer */
1520 if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
1521 /* If this packet will not fit, then flush */
1522 if (txq->agg_pktleft == 0 ||
1523 RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
1524 if (hn_flush_txagg(txq, &need_sig))
1529 pkt = hn_try_txagg(hv, txq, txd, pkt_size);
1533 hn_encap(pkt, queue_id, m);
1534 hn_append_to_chim(txq, pkt, m);
1536 rte_pktmbuf_free(m);
1538 /* if buffer is full, flush */
1539 if (txq->agg_pktleft == 0 &&
1540 hn_flush_txagg(txq, &need_sig))
1543 /* Send any outstanding packets in buffer */
1544 if (txq->agg_txd && hn_flush_txagg(txq, &need_sig))
1547 pkt = txd->rndis_pkt;
1549 txd->data_size = m->pkt_len;
1552 hn_encap(pkt, queue_id, m);
1554 ret = hn_xmit_sg(txq, txd, m, &need_sig);
1555 if (unlikely(ret != 0)) {
1556 if (ret == -EAGAIN) {
1557 PMD_TX_LOG(DEBUG, "sg channel full");
1558 ++txq->stats.channel_full;
1560 PMD_DRV_LOG(NOTICE, "sg send failed: %d", ret);
1561 ++txq->stats.errors;
1563 hn_txd_put(txq, txd);
1569 /* If partial buffer left, then try and send it.
1570 * if that fails, then reuse it on next send.
1572 hn_flush_txagg(txq, &need_sig);
1576 rte_vmbus_chan_signal_tx(txq->chan);
1582 hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
1583 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1587 if (unlikely(nb_pkts == 0))
1590 n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
1592 /* relabel the received mbufs */
1593 for (i = 0; i < n; i++)
1594 rx_pkts[i]->port = rxq->port_id;
1600 hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1602 struct hn_rx_queue *rxq = prxq;
1603 struct hn_data *hv = rxq->hv;
1604 struct rte_eth_dev *vf_dev;
1607 if (unlikely(hv->closed))
1610 /* Check for new completions (and hotplug) */
1611 if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
1612 hn_process_events(hv, rxq->queue_id, 0);
1614 /* Always check the vmbus path for multicast and new flows */
1615 nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
1616 (void **)rx_pkts, nb_pkts, NULL);
1618 /* If VF is available, check that as well */
1619 rte_rwlock_read_lock(&hv->vf_lock);
1620 vf_dev = hn_get_vf_dev(hv);
1621 if (vf_dev && vf_dev->data->dev_started)
1622 nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
1623 rx_pkts + nb_rcv, nb_pkts - nb_rcv);
1625 rte_rwlock_read_unlock(&hv->vf_lock);
1630 hn_dev_free_queues(struct rte_eth_dev *dev)
1634 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1635 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
1637 hn_rx_queue_free(rxq, false);
1638 dev->data->rx_queues[i] = NULL;
1640 dev->data->nb_rx_queues = 0;
1642 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1643 hn_dev_tx_queue_release(dev->data->tx_queues[i]);
1644 dev->data->tx_queues[i] = NULL;
1646 dev->data->nb_tx_queues = 0;