1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_atomic.h>
19 #include <rte_branch_prediction.h>
20 #include <rte_ether.h>
21 #include <rte_ethdev_driver.h>
22 #include <rte_cycles.h>
23 #include <rte_errno.h>
24 #include <rte_memory.h>
27 #include <rte_bus_vmbus.h>
35 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
36 DEV_TX_OFFLOAD_TCP_CKSUM | \
37 DEV_TX_OFFLOAD_UDP_CKSUM | \
38 DEV_TX_OFFLOAD_TCP_TSO | \
39 DEV_TX_OFFLOAD_MULTI_SEGS | \
40 DEV_TX_OFFLOAD_VLAN_INSERT)
42 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
43 DEV_RX_OFFLOAD_VLAN_STRIP)
46 int hn_logtype_driver;
48 struct hn_xstats_name_off {
49 char name[RTE_ETH_XSTATS_NAME_SIZE];
53 static const struct hn_xstats_name_off hn_stat_strings[] = {
54 { "good_packets", offsetof(struct hn_stats, packets) },
55 { "good_bytes", offsetof(struct hn_stats, bytes) },
56 { "errors", offsetof(struct hn_stats, errors) },
57 { "allocation_failed", offsetof(struct hn_stats, nomemory) },
58 { "multicast_packets", offsetof(struct hn_stats, multicast) },
59 { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
60 { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
61 { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
62 { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
63 { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
64 { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
65 { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
66 { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
67 { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
70 static struct rte_eth_dev *
71 eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
73 struct rte_eth_dev *eth_dev;
79 name = dev->device.name;
81 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
82 eth_dev = rte_eth_dev_allocate(name);
84 PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
88 if (private_data_size) {
89 eth_dev->data->dev_private =
90 rte_zmalloc_socket(name, private_data_size,
91 RTE_CACHE_LINE_SIZE, dev->device.numa_node);
92 if (!eth_dev->data->dev_private) {
93 PMD_DRV_LOG(NOTICE, "can not allocate driver data");
94 rte_eth_dev_release_port(eth_dev);
99 eth_dev = rte_eth_dev_attach_secondary(name);
101 PMD_DRV_LOG(NOTICE, "can not attach secondary");
106 eth_dev->device = &dev->device;
107 eth_dev->intr_handle = &dev->intr_handle;
113 eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
115 /* free ether device */
116 rte_eth_dev_release_port(eth_dev);
118 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
119 rte_free(eth_dev->data->dev_private);
121 eth_dev->data->dev_private = NULL;
124 * Secondary process will check the name to attach.
125 * Clear this field to avoid attaching a released ports.
127 eth_dev->data->name[0] = '\0';
129 eth_dev->device = NULL;
130 eth_dev->intr_handle = NULL;
133 /* Update link status.
134 * Note: the DPDK definition of "wait_to_complete"
135 * means block this call until link is up.
136 * which is not worth supporting.
139 hn_dev_link_update(struct rte_eth_dev *dev,
140 __rte_unused int wait_to_complete)
142 struct hn_data *hv = dev->data->dev_private;
143 struct rte_eth_link link, old;
146 old = dev->data->dev_link;
148 error = hn_rndis_get_linkstatus(hv);
152 hn_rndis_get_linkspeed(hv);
154 link = (struct rte_eth_link) {
155 .link_duplex = ETH_LINK_FULL_DUPLEX,
156 .link_autoneg = ETH_LINK_SPEED_FIXED,
157 .link_speed = hv->link_speed / 10000,
160 if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
161 link.link_status = ETH_LINK_UP;
163 link.link_status = ETH_LINK_DOWN;
165 if (old.link_status == link.link_status)
168 PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
169 (link.link_status == ETH_LINK_UP) ? "up" : "down");
171 return rte_eth_linkstatus_set(dev, &link);
174 static void hn_dev_info_get(struct rte_eth_dev *dev,
175 struct rte_eth_dev_info *dev_info)
177 struct hn_data *hv = dev->data->dev_private;
179 dev_info->speed_capa = ETH_LINK_SPEED_10G;
180 dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
181 dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
182 dev_info->max_mac_addrs = 1;
184 dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
185 dev_info->flow_type_rss_offloads =
186 ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
188 dev_info->max_rx_queues = hv->max_queues;
189 dev_info->max_tx_queues = hv->max_queues;
191 hn_rndis_get_offload(hv, dev_info);
195 hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
197 struct hn_data *hv = dev->data->dev_private;
199 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
203 hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
205 struct hn_data *hv = dev->data->dev_private;
208 filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
209 if (dev->data->all_multicast)
210 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
211 hn_rndis_set_rxfilter(hv, filter);
215 hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
217 struct hn_data *hv = dev->data->dev_private;
219 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
220 NDIS_PACKET_TYPE_ALL_MULTICAST |
221 NDIS_PACKET_TYPE_BROADCAST);
225 hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
227 struct hn_data *hv = dev->data->dev_private;
229 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
230 NDIS_PACKET_TYPE_BROADCAST);
233 /* Setup shared rx/tx queue data */
234 static int hn_subchan_configure(struct hn_data *hv,
237 struct vmbus_channel *primary = hn_primary_chan(hv);
239 unsigned int retry = 0;
242 "open %u subchannels", subchan);
244 /* Send create sub channels command */
245 err = hn_nvs_alloc_subchans(hv, &subchan);
249 while (subchan > 0) {
250 struct vmbus_channel *new_sc;
253 err = rte_vmbus_subchan_open(primary, &new_sc);
254 if (err == -ENOENT && ++retry < 1000) {
255 /* This can happen if not ready yet */
262 "open subchannel failed: %d", err);
266 rte_vmbus_set_latency(hv->vmbus, new_sc,
270 chn_index = rte_vmbus_sub_channel_index(new_sc);
271 if (chn_index == 0 || chn_index > hv->max_queues) {
273 "Invalid subchannel offermsg channel %u",
278 PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
279 hv->channels[chn_index] = new_sc;
286 static int hn_dev_configure(struct rte_eth_dev *dev)
288 const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
289 const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
290 const struct rte_eth_txmode *txmode = &dev_conf->txmode;
292 const struct rte_eth_rss_conf *rss_conf =
293 &dev_conf->rx_adv_conf.rss_conf;
294 struct hn_data *hv = dev->data->dev_private;
295 uint64_t unsupported;
298 PMD_INIT_FUNC_TRACE();
300 unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
303 "unsupported TX offload: %#" PRIx64,
308 unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
311 "unsupported RX offload: %#" PRIx64,
316 err = hn_rndis_conf_offload(hv, txmode->offloads,
320 "offload configure failed");
324 hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
325 dev->data->nb_tx_queues);
326 subchan = hv->num_queues - 1;
328 err = hn_subchan_configure(hv, subchan);
331 "subchannel configuration failed");
335 err = hn_rndis_conf_rss(hv, rss_conf);
338 "rss configuration failed");
346 static int hn_dev_stats_get(struct rte_eth_dev *dev,
347 struct rte_eth_stats *stats)
351 for (i = 0; i < dev->data->nb_tx_queues; i++) {
352 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
357 stats->opackets += txq->stats.packets;
358 stats->obytes += txq->stats.bytes;
359 stats->oerrors += txq->stats.errors + txq->stats.nomemory;
361 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
362 stats->q_opackets[i] = txq->stats.packets;
363 stats->q_obytes[i] = txq->stats.bytes;
367 for (i = 0; i < dev->data->nb_rx_queues; i++) {
368 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
373 stats->ipackets += rxq->stats.packets;
374 stats->ibytes += rxq->stats.bytes;
375 stats->ierrors += rxq->stats.errors;
376 stats->imissed += rxq->ring_full;
378 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
379 stats->q_ipackets[i] = rxq->stats.packets;
380 stats->q_ibytes[i] = rxq->stats.bytes;
384 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
389 hn_dev_stats_reset(struct rte_eth_dev *dev)
393 PMD_INIT_FUNC_TRACE();
395 for (i = 0; i < dev->data->nb_tx_queues; i++) {
396 struct hn_tx_queue *txq = dev->data->tx_queues[i];
400 memset(&txq->stats, 0, sizeof(struct hn_stats));
403 for (i = 0; i < dev->data->nb_rx_queues; i++) {
404 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
409 memset(&rxq->stats, 0, sizeof(struct hn_stats));
415 hn_dev_xstats_get_names(struct rte_eth_dev *dev,
416 struct rte_eth_xstat_name *xstats_names,
417 __rte_unused unsigned int limit)
419 unsigned int i, t, count = 0;
421 PMD_INIT_FUNC_TRACE();
424 return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
425 + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
427 /* Note: limit checked in rte_eth_xstats_names() */
428 for (i = 0; i < dev->data->nb_tx_queues; i++) {
429 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
434 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
435 snprintf(xstats_names[count++].name,
436 RTE_ETH_XSTATS_NAME_SIZE,
437 "tx_q%u_%s", i, hn_stat_strings[t].name);
440 for (i = 0; i < dev->data->nb_rx_queues; i++) {
441 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
446 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
447 snprintf(xstats_names[count++].name,
448 RTE_ETH_XSTATS_NAME_SIZE,
450 hn_stat_strings[t].name);
457 hn_dev_xstats_get(struct rte_eth_dev *dev,
458 struct rte_eth_xstat *xstats,
461 unsigned int i, t, count = 0;
463 const unsigned int nstats =
464 dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
465 + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
468 PMD_INIT_FUNC_TRACE();
473 for (i = 0; i < dev->data->nb_tx_queues; i++) {
474 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
479 stats = (const char *)&txq->stats;
480 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
481 xstats[count++].value = *(const uint64_t *)
482 (stats + hn_stat_strings[t].offset);
485 for (i = 0; i < dev->data->nb_rx_queues; i++) {
486 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
491 stats = (const char *)&rxq->stats;
492 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
493 xstats[count++].value = *(const uint64_t *)
494 (stats + hn_stat_strings[t].offset);
501 hn_dev_start(struct rte_eth_dev *dev)
503 struct hn_data *hv = dev->data->dev_private;
505 PMD_INIT_FUNC_TRACE();
507 /* check if lsc interrupt feature is enabled */
508 if (dev->data->dev_conf.intr_conf.lsc) {
509 PMD_DRV_LOG(ERR, "link status not supported yet");
513 return hn_rndis_set_rxfilter(hv,
514 NDIS_PACKET_TYPE_BROADCAST |
515 NDIS_PACKET_TYPE_ALL_MULTICAST |
516 NDIS_PACKET_TYPE_DIRECTED);
520 hn_dev_stop(struct rte_eth_dev *dev)
522 struct hn_data *hv = dev->data->dev_private;
524 PMD_INIT_FUNC_TRACE();
526 hn_rndis_set_rxfilter(hv, 0);
530 hn_dev_close(struct rte_eth_dev *dev __rte_unused)
532 PMD_INIT_LOG(DEBUG, "close");
535 static const struct eth_dev_ops hn_eth_dev_ops = {
536 .dev_configure = hn_dev_configure,
537 .dev_start = hn_dev_start,
538 .dev_stop = hn_dev_stop,
539 .dev_close = hn_dev_close,
540 .dev_infos_get = hn_dev_info_get,
541 .txq_info_get = hn_dev_tx_queue_info,
542 .rxq_info_get = hn_dev_rx_queue_info,
543 .promiscuous_enable = hn_dev_promiscuous_enable,
544 .promiscuous_disable = hn_dev_promiscuous_disable,
545 .allmulticast_enable = hn_dev_allmulticast_enable,
546 .allmulticast_disable = hn_dev_allmulticast_disable,
547 .tx_queue_setup = hn_dev_tx_queue_setup,
548 .tx_queue_release = hn_dev_tx_queue_release,
549 .tx_done_cleanup = hn_dev_tx_done_cleanup,
550 .rx_queue_setup = hn_dev_rx_queue_setup,
551 .rx_queue_release = hn_dev_rx_queue_release,
552 .link_update = hn_dev_link_update,
553 .stats_get = hn_dev_stats_get,
554 .xstats_get = hn_dev_xstats_get,
555 .xstats_get_names = hn_dev_xstats_get_names,
556 .stats_reset = hn_dev_stats_reset,
557 .xstats_reset = hn_dev_stats_reset,
561 * Setup connection between PMD and kernel.
564 hn_attach(struct hn_data *hv, unsigned int mtu)
569 error = hn_nvs_attach(hv, mtu);
574 error = hn_rndis_attach(hv);
580 * Under certain conditions on certain versions of Hyper-V,
581 * the RNDIS rxfilter is _not_ zero on the hypervisor side
582 * after the successful RNDIS initialization.
584 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
593 hn_detach(struct hn_data *hv)
600 eth_hn_dev_init(struct rte_eth_dev *eth_dev)
602 struct hn_data *hv = eth_dev->data->dev_private;
603 struct rte_device *device = eth_dev->device;
604 struct rte_vmbus_device *vmbus;
605 unsigned int rxr_cnt;
608 PMD_INIT_FUNC_TRACE();
610 vmbus = container_of(device, struct rte_vmbus_device, device);
611 eth_dev->dev_ops = &hn_eth_dev_ops;
612 eth_dev->tx_pkt_burst = &hn_xmit_pkts;
613 eth_dev->rx_pkt_burst = &hn_recv_pkts;
616 * for secondary processes, we don't initialize any further as primary
617 * has already done this work.
619 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
622 /* Since Hyper-V only supports one MAC address, just use local data */
623 eth_dev->data->mac_addrs = &hv->mac_addr;
626 hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
627 hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
628 hv->port_id = eth_dev->data->port_id;
630 /* Initialize primary channel input for control operations */
631 err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
635 rte_vmbus_set_latency(hv->vmbus, hv->channels[0],
638 hv->primary = hn_rx_queue_alloc(hv, 0,
639 eth_dev->device->numa_node);
644 err = hn_attach(hv, ETHER_MTU);
648 err = hn_tx_pool_init(eth_dev);
652 err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
656 max_chan = rte_vmbus_max_channels(vmbus);
657 PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
661 if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
664 hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
669 PMD_INIT_LOG(NOTICE, "device init failed");
676 eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
678 struct hn_data *hv = eth_dev->data->dev_private;
680 PMD_INIT_FUNC_TRACE();
682 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
685 hn_dev_stop(eth_dev);
686 hn_dev_close(eth_dev);
688 eth_dev->dev_ops = NULL;
689 eth_dev->tx_pkt_burst = NULL;
690 eth_dev->rx_pkt_burst = NULL;
693 rte_vmbus_chan_close(hv->primary->chan);
694 rte_free(hv->primary);
696 eth_dev->data->mac_addrs = NULL;
701 static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
702 struct rte_vmbus_device *dev)
704 struct rte_eth_dev *eth_dev;
707 PMD_INIT_FUNC_TRACE();
709 eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
713 ret = eth_hn_dev_init(eth_dev);
715 eth_dev_vmbus_release(eth_dev);
717 rte_eth_dev_probing_finish(eth_dev);
722 static int eth_hn_remove(struct rte_vmbus_device *dev)
724 struct rte_eth_dev *eth_dev;
727 PMD_INIT_FUNC_TRACE();
729 eth_dev = rte_eth_dev_allocated(dev->device.name);
733 ret = eth_hn_dev_uninit(eth_dev);
737 eth_dev_vmbus_release(eth_dev);
741 /* Network device GUID */
742 static const rte_uuid_t hn_net_ids[] = {
743 /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
744 RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
748 static struct rte_vmbus_driver rte_netvsc_pmd = {
749 .id_table = hn_net_ids,
750 .probe = eth_hn_probe,
751 .remove = eth_hn_remove,
754 RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
755 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
757 RTE_INIT(hn_init_log);
761 hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
762 if (hn_logtype_init >= 0)
763 rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
764 hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
765 if (hn_logtype_driver >= 0)
766 rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);