1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
14 #include <rte_ethdev.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_atomic.h>
20 #include <rte_branch_prediction.h>
21 #include <rte_ether.h>
22 #include <rte_common.h>
23 #include <rte_errno.h>
24 #include <rte_memory.h>
27 #include <rte_bus_vmbus.h>
28 #include <rte_spinlock.h>
36 #define HN_NVS_SEND_MSG_SIZE \
37 (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
39 #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
40 #define HN_TXCOPY_THRESHOLD 512
42 #define HN_RXCOPY_THRESHOLD 256
43 #define HN_RXQ_EVENT_DEFAULT 1024
52 #define HN_RXINFO_VLAN 0x0001
53 #define HN_RXINFO_CSUM 0x0002
54 #define HN_RXINFO_HASHINF 0x0004
55 #define HN_RXINFO_HASHVAL 0x0008
56 #define HN_RXINFO_ALL \
62 #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
63 #define HN_NDIS_RXCSUM_INFO_INVALID 0
64 #define HN_NDIS_HASH_INFO_INVALID 0
67 * Per-transmit book keeping.
68 * A slot in transmit ring (chim_index) is reserved for each transmit.
70 * There are two types of transmit:
71 * - buffered transmit where chimney buffer is used and RNDIS header
72 * is in the buffer. mbuf == NULL for this case.
74 * - direct transmit where RNDIS header is in the in rndis_pkt
75 * mbuf is freed after transmit.
77 * Descriptors come from per-port pool which is used
78 * to limit number of outstanding requests per device.
89 struct rndis_packet_msg *rndis_pkt;
92 #define HN_RNDIS_PKT_LEN \
93 (sizeof(struct rndis_packet_msg) + \
94 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
95 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
96 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
97 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
99 /* Minimum space required for a packet */
100 #define HN_PKTSIZE_MIN(align) \
101 RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
103 #define DEFAULT_TX_FREE_THRESH 32U
106 hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
108 uint32_t s = m->pkt_len;
109 const struct ether_addr *ea;
112 stats->size_bins[1]++;
113 } else if (s > 64 && s < 1024) {
116 /* count zeros, and offset into correct bin */
117 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
118 stats->size_bins[bin]++;
121 stats->size_bins[0]++;
123 stats->size_bins[6]++;
125 stats->size_bins[7]++;
128 ea = rte_pktmbuf_mtod(m, const struct ether_addr *);
129 if (is_multicast_ether_addr(ea)) {
130 if (is_broadcast_ether_addr(ea))
137 static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
139 return pkt->pktinfooffset + pkt->pktinfolen;
142 static inline uint32_t
143 hn_rndis_pktmsg_offset(uint32_t ofs)
145 return ofs - offsetof(struct rndis_packet_msg, dataoffset);
148 static void hn_txd_init(struct rte_mempool *mp __rte_unused,
149 void *opaque, void *obj, unsigned int idx)
151 struct hn_txdesc *txd = obj;
152 struct rte_eth_dev *dev = opaque;
153 struct rndis_packet_msg *pkt;
155 memset(txd, 0, sizeof(*txd));
156 txd->chim_index = idx;
158 pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN,
159 rte_align32pow2(HN_RNDIS_PKT_LEN),
160 dev->device->numa_node);
162 rte_exit(EXIT_FAILURE, "can not allocate RNDIS header");
164 txd->rndis_pkt = pkt;
168 * Unlike Linux and FreeBSD, this driver uses a mempool
169 * to limit outstanding transmits and reserve buffers
172 hn_tx_pool_init(struct rte_eth_dev *dev)
174 struct hn_data *hv = dev->data->dev_private;
175 char name[RTE_MEMPOOL_NAMESIZE];
176 struct rte_mempool *mp;
178 snprintf(name, sizeof(name),
179 "hn_txd_%u", dev->data->port_id);
181 PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d",
182 name, hv->chim_cnt, sizeof(struct hn_txdesc),
183 dev->device->numa_node);
185 mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc),
186 HN_TXD_CACHE_SIZE, 0,
189 dev->device->numa_node, 0);
192 "mempool %s create failed: %d", name, rte_errno);
200 static void hn_reset_txagg(struct hn_tx_queue *txq)
202 txq->agg_szleft = txq->agg_szmax;
203 txq->agg_pktleft = txq->agg_pktmax;
205 txq->agg_prevpkt = NULL;
209 hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
210 uint16_t queue_idx, uint16_t nb_desc __rte_unused,
211 unsigned int socket_id,
212 const struct rte_eth_txconf *tx_conf)
215 struct hn_data *hv = dev->data->dev_private;
216 struct hn_tx_queue *txq;
217 uint32_t tx_free_thresh;
219 PMD_INIT_FUNC_TRACE();
221 txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
227 txq->chan = hv->channels[queue_idx];
228 txq->port_id = dev->data->port_id;
229 txq->queue_id = queue_idx;
231 tx_free_thresh = tx_conf->tx_free_thresh;
232 if (tx_free_thresh == 0)
233 tx_free_thresh = RTE_MIN(hv->chim_cnt / 4,
234 DEFAULT_TX_FREE_THRESH);
236 if (tx_free_thresh >= hv->chim_cnt - 3)
237 tx_free_thresh = hv->chim_cnt - 3;
239 txq->free_thresh = tx_free_thresh;
241 txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
242 txq->agg_pktmax = hv->rndis_agg_pkts;
243 txq->agg_align = hv->rndis_agg_align;
247 dev->data->tx_queues[queue_idx] = txq;
253 hn_dev_tx_queue_release(void *arg)
255 struct hn_tx_queue *txq = arg;
256 struct hn_txdesc *txd;
258 PMD_INIT_FUNC_TRACE();
263 /* If any pending data is still present just drop it */
266 rte_mempool_put(txq->hv->tx_pool, txd);
272 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
273 unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
275 struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
276 struct hn_tx_queue *txq;
278 /* Control packets are sent with xacid == 0 */
282 txq = dev->data->tx_queues[queue_id];
283 if (likely(ack->status == NVS_STATUS_OK)) {
284 PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
285 txq->port_id, txq->queue_id, txd->chim_index,
286 txd->packets, txd->data_size);
287 txq->stats.bytes += txd->data_size;
288 txq->stats.packets += txd->packets;
290 PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
291 txq->port_id, txq->queue_id, txd->chim_index, ack->status);
295 rte_pktmbuf_free(txd->m);
297 rte_mempool_put(txq->hv->tx_pool, txd);
300 /* Handle transmit completion events */
302 hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
303 const struct vmbus_chanpkt_hdr *pkt,
306 const struct hn_nvs_hdr *hdr = data;
309 case NVS_TYPE_RNDIS_ACK:
310 hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
315 "unexpected send completion type %u",
320 /* Parse per-packet info (meta data) */
322 hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
323 struct hn_rxinfo *info)
325 const struct rndis_pktinfo *pi = info_data;
328 while (info_dlen != 0) {
332 if (unlikely(info_dlen < sizeof(*pi)))
335 if (unlikely(info_dlen < pi->size))
337 info_dlen -= pi->size;
339 if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
341 if (unlikely(pi->size < pi->offset))
344 dlen = pi->size - pi->offset;
348 case NDIS_PKTINFO_TYPE_VLAN:
349 if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
351 info->vlan_info = *((const uint32_t *)data);
352 mask |= HN_RXINFO_VLAN;
355 case NDIS_PKTINFO_TYPE_CSUM:
356 if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
358 info->csum_info = *((const uint32_t *)data);
359 mask |= HN_RXINFO_CSUM;
362 case NDIS_PKTINFO_TYPE_HASHVAL:
363 if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
365 info->hash_value = *((const uint32_t *)data);
366 mask |= HN_RXINFO_HASHVAL;
369 case NDIS_PKTINFO_TYPE_HASHINF:
370 if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
372 info->hash_info = *((const uint32_t *)data);
373 mask |= HN_RXINFO_HASHINF;
380 if (mask == HN_RXINFO_ALL)
381 break; /* All found; done */
383 pi = (const struct rndis_pktinfo *)
384 ((const uint8_t *)pi + pi->size);
389 * - If there is no hash value, invalidate the hash info.
391 if (!(mask & HN_RXINFO_HASHVAL))
392 info->hash_info = HN_NDIS_HASH_INFO_INVALID;
397 * Ack the consumed RXBUF associated w/ this channel packet,
398 * so that this RXBUF can be recycled by the hypervisor.
400 static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
402 struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
403 struct hn_data *hv = rxb->hv;
405 if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
406 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
407 --hv->rxbuf_outstanding;
411 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
413 hn_rx_buf_release(opaque);
416 static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
417 const struct vmbus_chanpkt_rxbuf *pkt)
419 struct hn_rx_bufinfo *rxb;
421 rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
422 rxb->chan = rxq->chan;
423 rxb->xactid = pkt->hdr.xactid;
426 rxb->shinfo.free_cb = hn_rx_buf_free_cb;
427 rxb->shinfo.fcb_opaque = rxb;
428 rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
432 static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
433 uint8_t *data, unsigned int headroom, unsigned int dlen,
434 const struct hn_rxinfo *info)
436 struct hn_data *hv = rxq->hv;
439 m = rte_pktmbuf_alloc(rxq->mb_pool);
441 struct rte_eth_dev *dev =
442 &rte_eth_devices[rxq->port_id];
444 dev->data->rx_mbuf_alloc_failed++;
449 * For large packets, avoid copy if possible but need to keep
450 * some space available in receive area for later packets.
452 if (dlen >= HN_RXCOPY_THRESHOLD &&
453 hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
454 struct rte_mbuf_ext_shared_info *shinfo;
459 * Build an external mbuf that points to recveive area.
460 * Use refcount to handle multiple packets in same
461 * receive buffer section.
463 rxbuf = hv->rxbuf_res->addr;
464 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
465 shinfo = &rxb->shinfo;
467 if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
468 ++hv->rxbuf_outstanding;
470 rte_pktmbuf_attach_extbuf(m, data, iova,
471 dlen + headroom, shinfo);
472 m->data_off = headroom;
474 /* Mbuf's in pool must be large enough to hold small packets */
475 if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
476 rte_pktmbuf_free_seg(m);
480 rte_memcpy(rte_pktmbuf_mtod(m, void *),
481 data + headroom, dlen);
484 m->port = rxq->port_id;
488 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
489 m->vlan_tci = info->vlan_info;
490 m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
493 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
494 if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
495 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
497 if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
498 | NDIS_RXCSUM_INFO_TCPCS_OK))
499 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
502 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
503 m->ol_flags |= PKT_RX_RSS_HASH;
504 m->hash.rss = info->hash_value;
507 PMD_RX_LOG(DEBUG, "port %u:%u RX id %" PRIu64 " size %u ol_flags %#" PRIx64,
508 rxq->port_id, rxq->queue_id, rxb->xactid,
509 m->pkt_len, m->ol_flags);
511 ++rxq->stats.packets;
512 rxq->stats.bytes += m->pkt_len;
513 hn_update_packet_stats(&rxq->stats, m);
515 if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
521 static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
522 struct hn_rx_bufinfo *rxb,
523 void *data, uint32_t dlen)
525 unsigned int data_off, data_len, pktinfo_off, pktinfo_len;
526 const struct rndis_packet_msg *pkt = data;
527 struct hn_rxinfo info = {
528 .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
529 .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
530 .hash_info = HN_NDIS_HASH_INFO_INVALID,
536 if (unlikely(dlen < sizeof(*pkt)))
539 if (unlikely(dlen < pkt->len))
540 goto error; /* truncated RNDIS from host */
542 if (unlikely(pkt->len < pkt->datalen
543 + pkt->oobdatalen + pkt->pktinfolen))
546 if (unlikely(pkt->datalen == 0))
550 if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
553 if (likely(pkt->pktinfooffset > 0) &&
554 unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
555 (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
558 data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
559 data_len = pkt->datalen;
560 pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
561 pktinfo_len = pkt->pktinfolen;
563 if (likely(pktinfo_len > 0)) {
564 err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
570 if (unlikely(data_off + data_len > pkt->len))
573 if (unlikely(data_len < ETHER_HDR_LEN))
576 hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
583 hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
584 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
586 const struct rndis_msghdr *hdr = buf;
589 case RNDIS_PACKET_MSG:
590 if (dev->data->dev_started)
591 hn_rndis_rx_data(rxq, rxb, buf, len);
594 case RNDIS_INDICATE_STATUS_MSG:
595 hn_rndis_link_status(rxq->hv, buf);
598 case RNDIS_INITIALIZE_CMPLT:
599 case RNDIS_QUERY_CMPLT:
600 case RNDIS_SET_CMPLT:
601 hn_rndis_receive_response(rxq->hv, buf, len);
606 "unexpected RNDIS message (type %#x len %u)",
613 hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
615 struct hn_rx_queue *rxq,
616 const struct vmbus_chanpkt_hdr *hdr,
619 const struct vmbus_chanpkt_rxbuf *pkt;
620 const struct hn_nvs_hdr *nvs_hdr = buf;
621 uint32_t rxbuf_sz = hv->rxbuf_res->len;
622 char *rxbuf = hv->rxbuf_res->addr;
623 unsigned int i, hlen, count;
624 struct hn_rx_bufinfo *rxb;
626 /* At minimum we need type header */
627 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
628 PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
632 /* Make sure that this is a RNDIS message. */
633 if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
634 PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
639 hlen = vmbus_chanpkt_getlen(hdr->hlen);
640 if (unlikely(hlen < sizeof(*pkt))) {
641 PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
645 pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
646 if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
647 PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
652 count = pkt->rxbuf_cnt;
653 if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
655 PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
659 if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
660 PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
665 /* Setup receive buffer info to allow for callback */
666 rxb = hn_rx_buf_init(rxq, pkt);
668 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
669 for (i = 0; i < count; ++i) {
670 unsigned int ofs, len;
672 ofs = pkt->rxbuf[i].ofs;
673 len = pkt->rxbuf[i].len;
675 if (unlikely(ofs + len > rxbuf_sz)) {
677 "%uth RNDIS msg overflow ofs %u, len %u",
682 if (unlikely(len == 0)) {
683 PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
687 hn_rndis_receive(dev, rxq, rxb,
691 /* Send ACK now if external mbuf not used */
692 hn_rx_buf_release(rxb);
695 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
697 unsigned int socket_id)
699 struct hn_rx_queue *rxq;
701 rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
702 RTE_CACHE_LINE_SIZE, socket_id);
705 rxq->chan = hv->channels[queue_id];
706 rte_spinlock_init(&rxq->ring_lock);
707 rxq->port_id = hv->port_id;
708 rxq->queue_id = queue_id;
710 rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
711 rxq->event_buf = rte_malloc_socket("RX_EVENTS",
715 if (!rxq->event_buf) {
724 hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
725 uint16_t queue_idx, uint16_t nb_desc,
726 unsigned int socket_id,
727 const struct rte_eth_rxconf *rx_conf __rte_unused,
728 struct rte_mempool *mp)
730 struct hn_data *hv = dev->data->dev_private;
731 uint32_t qmax = hv->rxbuf_section_cnt;
732 char ring_name[RTE_RING_NAMESIZE];
733 struct hn_rx_queue *rxq;
738 PMD_INIT_FUNC_TRACE();
740 if (nb_desc == 0 || nb_desc > qmax)
743 if (queue_idx == 0) {
746 rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
753 count = rte_align32pow2(nb_desc);
754 size = sizeof(struct rte_ring) + count * sizeof(void *);
755 rxq->rx_ring = rte_malloc_socket("RX_RING", size,
762 * Staging ring from receive event logic to rx_pkts.
763 * rx_pkts assumes caller is handling multi-thread issue.
764 * event logic has locking.
766 snprintf(ring_name, sizeof(ring_name),
767 "hn_rx_%u_%u", dev->data->port_id, queue_idx);
768 err = rte_ring_init(rxq->rx_ring, ring_name,
773 dev->data->rx_queues[queue_idx] = rxq;
777 rte_free(rxq->rx_ring);
778 rte_free(rxq->event_buf);
784 hn_dev_rx_queue_release(void *arg)
786 struct hn_rx_queue *rxq = arg;
788 PMD_INIT_FUNC_TRACE();
793 rte_free(rxq->rx_ring);
797 if (rxq != rxq->hv->primary) {
798 rte_free(rxq->event_buf);
804 hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
807 const struct hn_nvs_hdr *hdr = data;
809 if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
810 PMD_DRV_LOG(ERR, "invalid nvs notify");
815 "got notify, nvs type %u", hdr->type);
819 * Process pending events on the channel.
820 * Called from both Rx queue poll and Tx cleanup
822 void hn_process_events(struct hn_data *hv, uint16_t queue_id)
824 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
825 struct hn_rx_queue *rxq;
828 rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
830 /* If no pending data then nothing to do */
831 if (rte_vmbus_chan_rx_empty(rxq->chan))
835 * Since channel is shared between Rx and TX queue need to have a lock
836 * since DPDK does not force same CPU to be used for Rx/Tx.
838 if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
842 const struct vmbus_chanpkt_hdr *pkt;
843 uint32_t len = rxq->event_sz;
846 ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
848 break; /* ring is empty */
850 if (ret == -ENOBUFS) {
851 /* expanded buffer needed */
852 len = rte_align32pow2(len);
853 PMD_DRV_LOG(DEBUG, "expand event buf to %u", len);
855 rxq->event_buf = rte_realloc(rxq->event_buf,
856 len, RTE_CACHE_LINE_SIZE);
857 if (rxq->event_buf) {
862 rte_exit(EXIT_FAILURE, "can not expand event buf!\n");
867 PMD_DRV_LOG(ERR, "vmbus ring buffer error: %d", ret);
871 pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
872 data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
875 case VMBUS_CHANPKT_TYPE_COMP:
876 hn_nvs_handle_comp(dev, queue_id, pkt, data);
879 case VMBUS_CHANPKT_TYPE_RXBUF:
880 hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
883 case VMBUS_CHANPKT_TYPE_INBAND:
884 hn_nvs_handle_notify(pkt, data);
888 PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
892 rte_spinlock_unlock(&rxq->ring_lock);
894 if (unlikely(ret != -EAGAIN))
895 PMD_DRV_LOG(ERR, "channel receive failed: %d", ret);
898 static void hn_append_to_chim(struct hn_tx_queue *txq,
899 struct rndis_packet_msg *pkt,
900 const struct rte_mbuf *m)
902 struct hn_txdesc *txd = txq->agg_txd;
903 uint8_t *buf = (uint8_t *)pkt;
904 unsigned int data_offs;
908 data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
909 txd->chim_size += pkt->len;
910 txd->data_size += m->pkt_len;
912 hn_update_packet_stats(&txq->stats, m);
914 for (; m; m = m->next) {
915 uint16_t len = rte_pktmbuf_data_len(m);
917 rte_memcpy(buf + data_offs,
918 rte_pktmbuf_mtod(m, const char *), len);
924 * Send pending aggregated data in chimney buffer (if any).
925 * Returns error if send was unsuccessful because channel ring buffer
928 static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
931 struct hn_txdesc *txd = txq->agg_txd;
932 struct hn_nvs_rndis rndis;
938 rndis = (struct hn_nvs_rndis) {
939 .type = NVS_TYPE_RNDIS,
940 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
941 .chim_idx = txd->chim_index,
942 .chim_sz = txd->chim_size,
945 PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
946 txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
948 ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
949 &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
951 if (likely(ret == 0))
954 PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d",
955 txq->port_id, txq->queue_id, ret);
960 static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
961 struct hn_tx_queue *txq)
963 struct hn_txdesc *txd;
965 if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
966 ++txq->stats.nomemory;
967 PMD_TX_LOG(DEBUG, "tx pool exhausted!");
972 txd->queue_id = txq->queue_id;
981 hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize)
983 struct hn_txdesc *agg_txd = txq->agg_txd;
984 struct rndis_packet_msg *pkt;
988 unsigned int padding, olen;
991 * Update the previous RNDIS packet's total length,
992 * it can be increased due to the mandatory alignment
993 * padding for this RNDIS packet. And update the
994 * aggregating txdesc's chimney sending buffer size
997 * Zero-out the padding, as required by the RNDIS spec.
999 pkt = txq->agg_prevpkt;
1001 padding = RTE_ALIGN(olen, txq->agg_align) - olen;
1003 agg_txd->chim_size += padding;
1004 pkt->len += padding;
1005 memset((uint8_t *)pkt + olen, 0, padding);
1008 chim = (uint8_t *)pkt + pkt->len;
1011 txq->agg_szleft -= pktsize;
1012 if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
1014 * Probably can't aggregate more packets,
1015 * flush this aggregating txdesc proactively.
1017 txq->agg_pktleft = 0;
1020 agg_txd = hn_new_txd(hv, txq);
1024 chim = (uint8_t *)hv->chim_res->addr
1025 + agg_txd->chim_index * hv->chim_szmax;
1027 txq->agg_txd = agg_txd;
1028 txq->agg_pktleft = txq->agg_pktmax - 1;
1029 txq->agg_szleft = txq->agg_szmax - pktsize;
1031 txq->agg_prevpkt = chim;
1036 static inline void *
1037 hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
1038 uint32_t pi_dlen, uint32_t pi_type)
1040 const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
1041 struct rndis_pktinfo *pi;
1044 * Per-packet-info does not move; it only grows.
1047 * pktinfooffset in this phase counts from the beginning
1048 * of rndis_packet_msg.
1050 pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
1052 pkt->pktinfolen += pi_size;
1056 pi->offset = RNDIS_PKTINFO_OFFSET;
1061 /* Put RNDIS header and packet info on packet */
1062 static void hn_encap(struct rndis_packet_msg *pkt,
1064 const struct rte_mbuf *m)
1066 unsigned int hlen = m->l2_len + m->l3_len;
1070 pkt->type = RNDIS_PACKET_MSG;
1071 pkt->len = m->pkt_len;
1072 pkt->dataoffset = 0;
1073 pkt->datalen = m->pkt_len;
1074 pkt->oobdataoffset = 0;
1075 pkt->oobdatalen = 0;
1076 pkt->oobdataelements = 0;
1077 pkt->pktinfooffset = sizeof(*pkt);
1078 pkt->pktinfolen = 0;
1083 * Set the hash value for this packet, to the queue_id to cause
1084 * TX done event for this packet on the right channel.
1086 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
1087 NDIS_PKTINFO_TYPE_HASHVAL);
1088 *pi_data = queue_id;
1090 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1091 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
1092 NDIS_PKTINFO_TYPE_VLAN);
1093 *pi_data = m->vlan_tci;
1096 if (m->ol_flags & PKT_TX_TCP_SEG) {
1097 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
1098 NDIS_PKTINFO_TYPE_LSO);
1100 if (m->ol_flags & PKT_TX_IPV6) {
1101 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
1104 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
1107 } else if (m->ol_flags &
1108 (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
1109 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
1110 NDIS_PKTINFO_TYPE_CSUM);
1113 if (m->ol_flags & PKT_TX_IPV6)
1114 *pi_data |= NDIS_TXCSUM_INFO_IPV6;
1115 if (m->ol_flags & PKT_TX_IPV4) {
1116 *pi_data |= NDIS_TXCSUM_INFO_IPV4;
1118 if (m->ol_flags & PKT_TX_IP_CKSUM)
1119 *pi_data |= NDIS_TXCSUM_INFO_IPCS;
1122 if (m->ol_flags & PKT_TX_TCP_CKSUM)
1123 *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
1124 else if (m->ol_flags & PKT_TX_UDP_CKSUM)
1125 *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
1128 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
1129 /* Fixup RNDIS packet message total length */
1130 pkt->len += pkt_hlen;
1132 /* Convert RNDIS packet message offsets */
1133 pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
1134 pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
1137 /* How many scatter gather list elements ar needed */
1138 static unsigned int hn_get_slots(const struct rte_mbuf *m)
1140 unsigned int slots = 1; /* for RNDIS header */
1143 unsigned int size = rte_pktmbuf_data_len(m);
1144 unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
1146 slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
1153 /* Build scatter gather list from chained mbuf */
1154 static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
1155 const struct rte_mbuf *m)
1157 unsigned int segs = 0;
1160 rte_iova_t addr = rte_mbuf_data_iova(m);
1161 unsigned int page = addr / PAGE_SIZE;
1162 unsigned int offset = addr & PAGE_MASK;
1163 unsigned int len = rte_pktmbuf_data_len(m);
1166 unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
1168 sg[segs].page = page;
1169 sg[segs].ofs = offset;
1170 sg[segs].len = bytes;
1183 /* Transmit directly from mbuf */
1184 static int hn_xmit_sg(struct hn_tx_queue *txq,
1185 const struct hn_txdesc *txd, const struct rte_mbuf *m,
1188 struct vmbus_gpa sg[hn_get_slots(m)];
1189 struct hn_nvs_rndis nvs_rndis = {
1190 .type = NVS_TYPE_RNDIS,
1191 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
1192 .chim_sz = txd->chim_size,
1197 /* attach aggregation data if present */
1198 if (txd->chim_size > 0)
1199 nvs_rndis.chim_idx = txd->chim_index;
1201 nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
1203 hn_rndis_dump(txd->rndis_pkt);
1205 /* pass IOVA of rndis header in first segment */
1206 addr = rte_malloc_virt2iova(txd->rndis_pkt);
1207 if (unlikely(addr == RTE_BAD_IOVA)) {
1208 PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
1212 sg[0].page = addr / PAGE_SIZE;
1213 sg[0].ofs = addr & PAGE_MASK;
1214 sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
1217 hn_update_packet_stats(&txq->stats, m);
1219 segs += hn_fill_sg(sg + 1, m);
1221 PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
1222 txq->port_id, txq->queue_id, txd->chim_index,
1223 segs, nvs_rndis.chim_sz);
1225 return hn_nvs_send_sglist(txq->chan, sg, segs,
1226 &nvs_rndis, sizeof(nvs_rndis),
1227 (uintptr_t)txd, need_sig);
1231 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1233 struct hn_tx_queue *txq = ptxq;
1234 struct hn_data *hv = txq->hv;
1235 bool need_sig = false;
1239 if (unlikely(hv->closed))
1242 if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
1243 hn_process_events(hv, txq->queue_id);
1245 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1246 struct rte_mbuf *m = tx_pkts[nb_tx];
1247 uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
1248 struct rndis_packet_msg *pkt;
1250 /* For small packets aggregate them in chimney buffer */
1251 if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
1252 /* If this packet will not fit, then flush */
1253 if (txq->agg_pktleft == 0 ||
1254 RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
1255 if (hn_flush_txagg(txq, &need_sig))
1259 pkt = hn_try_txagg(hv, txq, pkt_size);
1263 hn_encap(pkt, txq->queue_id, m);
1264 hn_append_to_chim(txq, pkt, m);
1266 rte_pktmbuf_free(m);
1268 /* if buffer is full, flush */
1269 if (txq->agg_pktleft == 0 &&
1270 hn_flush_txagg(txq, &need_sig))
1273 struct hn_txdesc *txd;
1275 /* can send chimney data and large packet at once */
1278 hn_reset_txagg(txq);
1280 txd = hn_new_txd(hv, txq);
1285 pkt = txd->rndis_pkt;
1287 txd->data_size += m->pkt_len;
1290 hn_encap(pkt, txq->queue_id, m);
1292 ret = hn_xmit_sg(txq, txd, m, &need_sig);
1293 if (unlikely(ret != 0)) {
1294 PMD_TX_LOG(NOTICE, "sg send failed: %d", ret);
1295 ++txq->stats.errors;
1296 rte_mempool_put(hv->tx_pool, txd);
1302 /* If partial buffer left, then try and send it.
1303 * if that fails, then reuse it on next send.
1305 hn_flush_txagg(txq, &need_sig);
1309 rte_vmbus_chan_signal_tx(txq->chan);
1315 hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1317 struct hn_rx_queue *rxq = prxq;
1318 struct hn_data *hv = rxq->hv;
1320 if (unlikely(hv->closed))
1323 /* Get all outstanding receive completions */
1324 hn_process_events(hv, rxq->queue_id);
1326 /* Get mbufs off staging ring */
1327 return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,