1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_devargs.h>
18 #include <rte_malloc.h>
19 #include <rte_kvargs.h>
20 #include <rte_atomic.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_cycles.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
29 #include <rte_bus_vmbus.h>
37 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
38 DEV_TX_OFFLOAD_TCP_CKSUM | \
39 DEV_TX_OFFLOAD_UDP_CKSUM | \
40 DEV_TX_OFFLOAD_TCP_TSO | \
41 DEV_TX_OFFLOAD_MULTI_SEGS | \
42 DEV_TX_OFFLOAD_VLAN_INSERT)
44 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
45 DEV_RX_OFFLOAD_VLAN_STRIP)
48 int hn_logtype_driver;
50 struct hn_xstats_name_off {
51 char name[RTE_ETH_XSTATS_NAME_SIZE];
55 static const struct hn_xstats_name_off hn_stat_strings[] = {
56 { "good_packets", offsetof(struct hn_stats, packets) },
57 { "good_bytes", offsetof(struct hn_stats, bytes) },
58 { "errors", offsetof(struct hn_stats, errors) },
59 { "allocation_failed", offsetof(struct hn_stats, nomemory) },
60 { "multicast_packets", offsetof(struct hn_stats, multicast) },
61 { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
62 { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
63 { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
64 { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
65 { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
66 { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
67 { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
68 { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
69 { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
72 static struct rte_eth_dev *
73 eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
75 struct rte_eth_dev *eth_dev;
81 name = dev->device.name;
83 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
84 eth_dev = rte_eth_dev_allocate(name);
86 PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
90 if (private_data_size) {
91 eth_dev->data->dev_private =
92 rte_zmalloc_socket(name, private_data_size,
93 RTE_CACHE_LINE_SIZE, dev->device.numa_node);
94 if (!eth_dev->data->dev_private) {
95 PMD_DRV_LOG(NOTICE, "can not allocate driver data");
96 rte_eth_dev_release_port(eth_dev);
101 eth_dev = rte_eth_dev_attach_secondary(name);
103 PMD_DRV_LOG(NOTICE, "can not attach secondary");
108 eth_dev->device = &dev->device;
109 eth_dev->intr_handle = &dev->intr_handle;
115 eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
117 /* free ether device */
118 rte_eth_dev_release_port(eth_dev);
120 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
121 rte_free(eth_dev->data->dev_private);
123 eth_dev->data->dev_private = NULL;
126 * Secondary process will check the name to attach.
127 * Clear this field to avoid attaching a released ports.
129 eth_dev->data->name[0] = '\0';
131 eth_dev->device = NULL;
132 eth_dev->intr_handle = NULL;
135 /* handle "latency=X" from devargs */
136 static int hn_set_latency(const char *key, const char *value, void *opaque)
138 struct hn_data *hv = opaque;
143 lat = strtoul(value, &endp, 0);
145 if (*value == '\0' || *endp != '\0') {
146 PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
150 PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat);
152 hv->latency = lat * 1000; /* usec to nsec */
156 /* Parse device arguments */
157 static int hn_parse_args(const struct rte_eth_dev *dev)
159 struct hn_data *hv = dev->data->dev_private;
160 struct rte_devargs *devargs = dev->device->devargs;
161 static const char * const valid_keys[] = {
165 struct rte_kvargs *kvlist;
170 PMD_INIT_LOG(DEBUG, "device args %s %s",
171 devargs->name, devargs->args);
173 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
175 PMD_DRV_LOG(NOTICE, "invalid parameters");
179 rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
180 rte_kvargs_free(kvlist);
184 /* Update link status.
185 * Note: the DPDK definition of "wait_to_complete"
186 * means block this call until link is up.
187 * which is not worth supporting.
190 hn_dev_link_update(struct rte_eth_dev *dev,
191 __rte_unused int wait_to_complete)
193 struct hn_data *hv = dev->data->dev_private;
194 struct rte_eth_link link, old;
197 old = dev->data->dev_link;
199 error = hn_rndis_get_linkstatus(hv);
203 hn_rndis_get_linkspeed(hv);
205 link = (struct rte_eth_link) {
206 .link_duplex = ETH_LINK_FULL_DUPLEX,
207 .link_autoneg = ETH_LINK_SPEED_FIXED,
208 .link_speed = hv->link_speed / 10000,
211 if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
212 link.link_status = ETH_LINK_UP;
214 link.link_status = ETH_LINK_DOWN;
216 if (old.link_status == link.link_status)
219 PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
220 (link.link_status == ETH_LINK_UP) ? "up" : "down");
222 return rte_eth_linkstatus_set(dev, &link);
225 static void hn_dev_info_get(struct rte_eth_dev *dev,
226 struct rte_eth_dev_info *dev_info)
228 struct hn_data *hv = dev->data->dev_private;
230 dev_info->speed_capa = ETH_LINK_SPEED_10G;
231 dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
232 dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
233 dev_info->max_mac_addrs = 1;
235 dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
236 dev_info->flow_type_rss_offloads =
237 ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
239 dev_info->max_rx_queues = hv->max_queues;
240 dev_info->max_tx_queues = hv->max_queues;
242 hn_rndis_get_offload(hv, dev_info);
246 hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
248 struct hn_data *hv = dev->data->dev_private;
250 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
254 hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
256 struct hn_data *hv = dev->data->dev_private;
259 filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
260 if (dev->data->all_multicast)
261 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
262 hn_rndis_set_rxfilter(hv, filter);
266 hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
268 struct hn_data *hv = dev->data->dev_private;
270 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
271 NDIS_PACKET_TYPE_ALL_MULTICAST |
272 NDIS_PACKET_TYPE_BROADCAST);
276 hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
278 struct hn_data *hv = dev->data->dev_private;
280 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
281 NDIS_PACKET_TYPE_BROADCAST);
284 /* Setup shared rx/tx queue data */
285 static int hn_subchan_configure(struct hn_data *hv,
288 struct vmbus_channel *primary = hn_primary_chan(hv);
290 unsigned int retry = 0;
293 "open %u subchannels", subchan);
295 /* Send create sub channels command */
296 err = hn_nvs_alloc_subchans(hv, &subchan);
300 while (subchan > 0) {
301 struct vmbus_channel *new_sc;
304 err = rte_vmbus_subchan_open(primary, &new_sc);
305 if (err == -ENOENT && ++retry < 1000) {
306 /* This can happen if not ready yet */
313 "open subchannel failed: %d", err);
317 rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency);
320 chn_index = rte_vmbus_sub_channel_index(new_sc);
321 if (chn_index == 0 || chn_index > hv->max_queues) {
323 "Invalid subchannel offermsg channel %u",
328 PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
329 hv->channels[chn_index] = new_sc;
336 static int hn_dev_configure(struct rte_eth_dev *dev)
338 const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
339 const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
340 const struct rte_eth_txmode *txmode = &dev_conf->txmode;
342 const struct rte_eth_rss_conf *rss_conf =
343 &dev_conf->rx_adv_conf.rss_conf;
344 struct hn_data *hv = dev->data->dev_private;
345 uint64_t unsupported;
348 PMD_INIT_FUNC_TRACE();
350 unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
353 "unsupported TX offload: %#" PRIx64,
358 unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
361 "unsupported RX offload: %#" PRIx64,
366 err = hn_rndis_conf_offload(hv, txmode->offloads,
370 "offload configure failed");
374 hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
375 dev->data->nb_tx_queues);
376 subchan = hv->num_queues - 1;
378 err = hn_subchan_configure(hv, subchan);
381 "subchannel configuration failed");
385 err = hn_rndis_conf_rss(hv, rss_conf);
388 "rss configuration failed");
396 static int hn_dev_stats_get(struct rte_eth_dev *dev,
397 struct rte_eth_stats *stats)
401 for (i = 0; i < dev->data->nb_tx_queues; i++) {
402 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
407 stats->opackets += txq->stats.packets;
408 stats->obytes += txq->stats.bytes;
409 stats->oerrors += txq->stats.errors + txq->stats.nomemory;
411 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
412 stats->q_opackets[i] = txq->stats.packets;
413 stats->q_obytes[i] = txq->stats.bytes;
417 for (i = 0; i < dev->data->nb_rx_queues; i++) {
418 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
423 stats->ipackets += rxq->stats.packets;
424 stats->ibytes += rxq->stats.bytes;
425 stats->ierrors += rxq->stats.errors;
426 stats->imissed += rxq->ring_full;
428 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
429 stats->q_ipackets[i] = rxq->stats.packets;
430 stats->q_ibytes[i] = rxq->stats.bytes;
434 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
439 hn_dev_stats_reset(struct rte_eth_dev *dev)
443 PMD_INIT_FUNC_TRACE();
445 for (i = 0; i < dev->data->nb_tx_queues; i++) {
446 struct hn_tx_queue *txq = dev->data->tx_queues[i];
450 memset(&txq->stats, 0, sizeof(struct hn_stats));
453 for (i = 0; i < dev->data->nb_rx_queues; i++) {
454 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
459 memset(&rxq->stats, 0, sizeof(struct hn_stats));
465 hn_dev_xstats_get_names(struct rte_eth_dev *dev,
466 struct rte_eth_xstat_name *xstats_names,
467 __rte_unused unsigned int limit)
469 unsigned int i, t, count = 0;
471 PMD_INIT_FUNC_TRACE();
474 return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
475 + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
477 /* Note: limit checked in rte_eth_xstats_names() */
478 for (i = 0; i < dev->data->nb_tx_queues; i++) {
479 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
484 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
485 snprintf(xstats_names[count++].name,
486 RTE_ETH_XSTATS_NAME_SIZE,
487 "tx_q%u_%s", i, hn_stat_strings[t].name);
490 for (i = 0; i < dev->data->nb_rx_queues; i++) {
491 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
496 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
497 snprintf(xstats_names[count++].name,
498 RTE_ETH_XSTATS_NAME_SIZE,
500 hn_stat_strings[t].name);
507 hn_dev_xstats_get(struct rte_eth_dev *dev,
508 struct rte_eth_xstat *xstats,
511 unsigned int i, t, count = 0;
513 const unsigned int nstats =
514 dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
515 + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
518 PMD_INIT_FUNC_TRACE();
523 for (i = 0; i < dev->data->nb_tx_queues; i++) {
524 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
529 stats = (const char *)&txq->stats;
530 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
531 xstats[count++].value = *(const uint64_t *)
532 (stats + hn_stat_strings[t].offset);
535 for (i = 0; i < dev->data->nb_rx_queues; i++) {
536 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
541 stats = (const char *)&rxq->stats;
542 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
543 xstats[count++].value = *(const uint64_t *)
544 (stats + hn_stat_strings[t].offset);
551 hn_dev_start(struct rte_eth_dev *dev)
553 struct hn_data *hv = dev->data->dev_private;
555 PMD_INIT_FUNC_TRACE();
557 /* check if lsc interrupt feature is enabled */
558 if (dev->data->dev_conf.intr_conf.lsc) {
559 PMD_DRV_LOG(ERR, "link status not supported yet");
563 return hn_rndis_set_rxfilter(hv,
564 NDIS_PACKET_TYPE_BROADCAST |
565 NDIS_PACKET_TYPE_ALL_MULTICAST |
566 NDIS_PACKET_TYPE_DIRECTED);
570 hn_dev_stop(struct rte_eth_dev *dev)
572 struct hn_data *hv = dev->data->dev_private;
574 PMD_INIT_FUNC_TRACE();
576 hn_rndis_set_rxfilter(hv, 0);
580 hn_dev_close(struct rte_eth_dev *dev __rte_unused)
582 PMD_INIT_LOG(DEBUG, "close");
585 static const struct eth_dev_ops hn_eth_dev_ops = {
586 .dev_configure = hn_dev_configure,
587 .dev_start = hn_dev_start,
588 .dev_stop = hn_dev_stop,
589 .dev_close = hn_dev_close,
590 .dev_infos_get = hn_dev_info_get,
591 .txq_info_get = hn_dev_tx_queue_info,
592 .rxq_info_get = hn_dev_rx_queue_info,
593 .promiscuous_enable = hn_dev_promiscuous_enable,
594 .promiscuous_disable = hn_dev_promiscuous_disable,
595 .allmulticast_enable = hn_dev_allmulticast_enable,
596 .allmulticast_disable = hn_dev_allmulticast_disable,
597 .tx_queue_setup = hn_dev_tx_queue_setup,
598 .tx_queue_release = hn_dev_tx_queue_release,
599 .tx_done_cleanup = hn_dev_tx_done_cleanup,
600 .rx_queue_setup = hn_dev_rx_queue_setup,
601 .rx_queue_release = hn_dev_rx_queue_release,
602 .link_update = hn_dev_link_update,
603 .stats_get = hn_dev_stats_get,
604 .xstats_get = hn_dev_xstats_get,
605 .xstats_get_names = hn_dev_xstats_get_names,
606 .stats_reset = hn_dev_stats_reset,
607 .xstats_reset = hn_dev_stats_reset,
611 * Setup connection between PMD and kernel.
614 hn_attach(struct hn_data *hv, unsigned int mtu)
619 error = hn_nvs_attach(hv, mtu);
624 error = hn_rndis_attach(hv);
630 * Under certain conditions on certain versions of Hyper-V,
631 * the RNDIS rxfilter is _not_ zero on the hypervisor side
632 * after the successful RNDIS initialization.
634 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
643 hn_detach(struct hn_data *hv)
650 eth_hn_dev_init(struct rte_eth_dev *eth_dev)
652 struct hn_data *hv = eth_dev->data->dev_private;
653 struct rte_device *device = eth_dev->device;
654 struct rte_vmbus_device *vmbus;
655 unsigned int rxr_cnt;
658 PMD_INIT_FUNC_TRACE();
660 vmbus = container_of(device, struct rte_vmbus_device, device);
661 eth_dev->dev_ops = &hn_eth_dev_ops;
662 eth_dev->tx_pkt_burst = &hn_xmit_pkts;
663 eth_dev->rx_pkt_burst = &hn_recv_pkts;
666 * for secondary processes, we don't initialize any further as primary
667 * has already done this work.
669 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
672 /* Since Hyper-V only supports one MAC address, just use local data */
673 eth_dev->data->mac_addrs = &hv->mac_addr;
676 hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
677 hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
678 hv->port_id = eth_dev->data->port_id;
679 hv->latency = HN_CHAN_LATENCY_NS;
681 err = hn_parse_args(eth_dev);
685 /* Initialize primary channel input for control operations */
686 err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
690 rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency);
692 hv->primary = hn_rx_queue_alloc(hv, 0,
693 eth_dev->device->numa_node);
698 err = hn_attach(hv, ETHER_MTU);
702 err = hn_tx_pool_init(eth_dev);
706 err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
710 max_chan = rte_vmbus_max_channels(vmbus);
711 PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
715 if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
718 hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
723 PMD_INIT_LOG(NOTICE, "device init failed");
730 eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
732 struct hn_data *hv = eth_dev->data->dev_private;
734 PMD_INIT_FUNC_TRACE();
736 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
739 hn_dev_stop(eth_dev);
740 hn_dev_close(eth_dev);
742 eth_dev->dev_ops = NULL;
743 eth_dev->tx_pkt_burst = NULL;
744 eth_dev->rx_pkt_burst = NULL;
747 rte_vmbus_chan_close(hv->primary->chan);
748 rte_free(hv->primary);
750 eth_dev->data->mac_addrs = NULL;
755 static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
756 struct rte_vmbus_device *dev)
758 struct rte_eth_dev *eth_dev;
761 PMD_INIT_FUNC_TRACE();
763 eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
767 ret = eth_hn_dev_init(eth_dev);
769 eth_dev_vmbus_release(eth_dev);
771 rte_eth_dev_probing_finish(eth_dev);
776 static int eth_hn_remove(struct rte_vmbus_device *dev)
778 struct rte_eth_dev *eth_dev;
781 PMD_INIT_FUNC_TRACE();
783 eth_dev = rte_eth_dev_allocated(dev->device.name);
787 ret = eth_hn_dev_uninit(eth_dev);
791 eth_dev_vmbus_release(eth_dev);
795 /* Network device GUID */
796 static const rte_uuid_t hn_net_ids[] = {
797 /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
798 RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
802 static struct rte_vmbus_driver rte_netvsc_pmd = {
803 .id_table = hn_net_ids,
804 .probe = eth_hn_probe,
805 .remove = eth_hn_remove,
808 RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
809 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
811 RTE_INIT(hn_init_log);
815 hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
816 if (hn_logtype_init >= 0)
817 rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
818 hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
819 if (hn_logtype_driver >= 0)
820 rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);