1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
14 #include <net/if_arp.h>
15 #include <sys/ioctl.h>
17 #include <rte_ethdev.h>
18 #include <rte_memcpy.h>
19 #include <rte_string_fns.h>
20 #include <rte_memzone.h>
21 #include <rte_devargs.h>
22 #include <rte_malloc.h>
23 #include <rte_kvargs.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_ether.h>
27 #include <ethdev_driver.h>
28 #include <rte_cycles.h>
29 #include <rte_errno.h>
30 #include <rte_memory.h>
33 #include <rte_bus_vmbus.h>
34 #include <rte_alarm.h>
42 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
43 DEV_TX_OFFLOAD_TCP_CKSUM | \
44 DEV_TX_OFFLOAD_UDP_CKSUM | \
45 DEV_TX_OFFLOAD_TCP_TSO | \
46 DEV_TX_OFFLOAD_MULTI_SEGS | \
47 DEV_TX_OFFLOAD_VLAN_INSERT)
49 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
50 DEV_RX_OFFLOAD_VLAN_STRIP | \
51 DEV_RX_OFFLOAD_RSS_HASH)
53 #define NETVSC_ARG_LATENCY "latency"
54 #define NETVSC_ARG_RXBREAK "rx_copybreak"
55 #define NETVSC_ARG_TXBREAK "tx_copybreak"
56 #define NETVSC_ARG_RX_EXTMBUF_ENABLE "rx_extmbuf_enable"
58 /* The max number of retry when hot adding a VF device */
59 #define NETVSC_MAX_HOTADD_RETRY 10
61 struct hn_xstats_name_off {
62 char name[RTE_ETH_XSTATS_NAME_SIZE];
66 static const struct hn_xstats_name_off hn_stat_strings[] = {
67 { "good_packets", offsetof(struct hn_stats, packets) },
68 { "good_bytes", offsetof(struct hn_stats, bytes) },
69 { "errors", offsetof(struct hn_stats, errors) },
70 { "ring full", offsetof(struct hn_stats, ring_full) },
71 { "channel full", offsetof(struct hn_stats, channel_full) },
72 { "multicast_packets", offsetof(struct hn_stats, multicast) },
73 { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
74 { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
75 { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
76 { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
77 { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
78 { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
79 { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
80 { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
81 { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
84 /* The default RSS key.
85 * This value is the same as MLX5 so that flows will be
86 * received on same path for both VF and synthetic NIC.
88 static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
89 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
90 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
91 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
92 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
93 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
96 static struct rte_eth_dev *
97 eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
99 struct rte_eth_dev *eth_dev;
105 name = dev->device.name;
107 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
108 eth_dev = rte_eth_dev_allocate(name);
110 PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
114 if (private_data_size) {
115 eth_dev->data->dev_private =
116 rte_zmalloc_socket(name, private_data_size,
117 RTE_CACHE_LINE_SIZE, dev->device.numa_node);
118 if (!eth_dev->data->dev_private) {
119 PMD_DRV_LOG(NOTICE, "can not allocate driver data");
120 rte_eth_dev_release_port(eth_dev);
125 eth_dev = rte_eth_dev_attach_secondary(name);
127 PMD_DRV_LOG(NOTICE, "can not attach secondary");
132 eth_dev->device = &dev->device;
134 /* interrupt is simulated */
135 dev->intr_handle.type = RTE_INTR_HANDLE_EXT;
136 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
137 eth_dev->intr_handle = &dev->intr_handle;
143 eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
145 /* free ether device */
146 rte_eth_dev_release_port(eth_dev);
148 eth_dev->device = NULL;
149 eth_dev->intr_handle = NULL;
152 static int hn_set_parameter(const char *key, const char *value, void *opaque)
154 struct hn_data *hv = opaque;
158 v = strtoul(value, &endp, 0);
159 if (*value == '\0' || *endp != '\0') {
160 PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
164 if (!strcmp(key, NETVSC_ARG_LATENCY)) {
166 hv->latency = v * 1000;
167 PMD_DRV_LOG(DEBUG, "set latency %u usec", hv->latency);
168 } else if (!strcmp(key, NETVSC_ARG_RXBREAK)) {
169 hv->rx_copybreak = v;
170 PMD_DRV_LOG(DEBUG, "rx copy break set to %u",
172 } else if (!strcmp(key, NETVSC_ARG_TXBREAK)) {
173 hv->tx_copybreak = v;
174 PMD_DRV_LOG(DEBUG, "tx copy break set to %u",
176 } else if (!strcmp(key, NETVSC_ARG_RX_EXTMBUF_ENABLE)) {
177 hv->rx_extmbuf_enable = v;
178 PMD_DRV_LOG(DEBUG, "rx extmbuf enable set to %u",
179 hv->rx_extmbuf_enable);
185 /* Parse device arguments */
186 static int hn_parse_args(const struct rte_eth_dev *dev)
188 struct hn_data *hv = dev->data->dev_private;
189 struct rte_devargs *devargs = dev->device->devargs;
190 static const char * const valid_keys[] = {
194 NETVSC_ARG_RX_EXTMBUF_ENABLE,
197 struct rte_kvargs *kvlist;
203 PMD_INIT_LOG(DEBUG, "device args %s %s",
204 devargs->name, devargs->args);
206 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
208 PMD_DRV_LOG(ERR, "invalid parameters");
212 ret = rte_kvargs_process(kvlist, NULL, hn_set_parameter, hv);
213 rte_kvargs_free(kvlist);
218 /* Update link status.
219 * Note: the DPDK definition of "wait_to_complete"
220 * means block this call until link is up.
221 * which is not worth supporting.
224 hn_dev_link_update(struct rte_eth_dev *dev,
225 int wait_to_complete __rte_unused)
227 struct hn_data *hv = dev->data->dev_private;
228 struct rte_eth_link link, old;
231 old = dev->data->dev_link;
233 error = hn_rndis_get_linkstatus(hv);
237 hn_rndis_get_linkspeed(hv);
239 link = (struct rte_eth_link) {
240 .link_duplex = ETH_LINK_FULL_DUPLEX,
241 .link_autoneg = ETH_LINK_SPEED_FIXED,
242 .link_speed = hv->link_speed / 10000,
245 if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
246 link.link_status = ETH_LINK_UP;
248 link.link_status = ETH_LINK_DOWN;
250 if (old.link_status == link.link_status)
253 PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
254 (link.link_status == ETH_LINK_UP) ? "up" : "down");
256 return rte_eth_linkstatus_set(dev, &link);
259 static int hn_dev_info_get(struct rte_eth_dev *dev,
260 struct rte_eth_dev_info *dev_info)
262 struct hn_data *hv = dev->data->dev_private;
265 dev_info->speed_capa = ETH_LINK_SPEED_10G;
266 dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
267 dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
268 dev_info->max_mac_addrs = 1;
270 dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
271 dev_info->flow_type_rss_offloads = hv->rss_offloads;
272 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
274 dev_info->max_rx_queues = hv->max_queues;
275 dev_info->max_tx_queues = hv->max_queues;
277 dev_info->tx_desc_lim.nb_min = 1;
278 dev_info->tx_desc_lim.nb_max = 4096;
280 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
283 /* fills in rx and tx offload capability */
284 rc = hn_rndis_get_offload(hv, dev_info);
288 /* merges the offload and queues of vf */
289 return hn_vf_info_get(hv, dev_info);
292 static int hn_rss_reta_update(struct rte_eth_dev *dev,
293 struct rte_eth_rss_reta_entry64 *reta_conf,
296 struct hn_data *hv = dev->data->dev_private;
300 PMD_INIT_FUNC_TRACE();
302 if (reta_size != NDIS_HASH_INDCNT) {
303 PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
307 for (i = 0; i < NDIS_HASH_INDCNT; i++) {
308 uint16_t idx = i / RTE_RETA_GROUP_SIZE;
309 uint16_t shift = i % RTE_RETA_GROUP_SIZE;
310 uint64_t mask = (uint64_t)1 << shift;
312 if (reta_conf[idx].mask & mask)
313 hv->rss_ind[i] = reta_conf[idx].reta[shift];
316 err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
319 "rss disable failed");
323 err = hn_rndis_conf_rss(hv, 0);
326 "reta reconfig failed");
330 return hn_vf_reta_hash_update(dev, reta_conf, reta_size);
333 static int hn_rss_reta_query(struct rte_eth_dev *dev,
334 struct rte_eth_rss_reta_entry64 *reta_conf,
337 struct hn_data *hv = dev->data->dev_private;
340 PMD_INIT_FUNC_TRACE();
342 if (reta_size != NDIS_HASH_INDCNT) {
343 PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
347 for (i = 0; i < NDIS_HASH_INDCNT; i++) {
348 uint16_t idx = i / RTE_RETA_GROUP_SIZE;
349 uint16_t shift = i % RTE_RETA_GROUP_SIZE;
350 uint64_t mask = (uint64_t)1 << shift;
352 if (reta_conf[idx].mask & mask)
353 reta_conf[idx].reta[shift] = hv->rss_ind[i];
358 static void hn_rss_hash_init(struct hn_data *hv,
359 const struct rte_eth_rss_conf *rss_conf)
361 /* Convert from DPDK RSS hash flags to NDIS hash flags */
362 hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
364 if (rss_conf->rss_hf & ETH_RSS_IPV4)
365 hv->rss_hash |= NDIS_HASH_IPV4;
366 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
367 hv->rss_hash |= NDIS_HASH_TCP_IPV4;
368 if (rss_conf->rss_hf & ETH_RSS_IPV6)
369 hv->rss_hash |= NDIS_HASH_IPV6;
370 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
371 hv->rss_hash |= NDIS_HASH_IPV6_EX;
372 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
373 hv->rss_hash |= NDIS_HASH_TCP_IPV6;
374 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
375 hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
377 memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
378 NDIS_HASH_KEYSIZE_TOEPLITZ);
381 static int hn_rss_hash_update(struct rte_eth_dev *dev,
382 struct rte_eth_rss_conf *rss_conf)
384 struct hn_data *hv = dev->data->dev_private;
387 PMD_INIT_FUNC_TRACE();
389 err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
392 "rss disable failed");
396 hn_rss_hash_init(hv, rss_conf);
398 if (rss_conf->rss_hf != 0) {
399 err = hn_rndis_conf_rss(hv, 0);
402 "rss reconfig failed (RSS disabled)");
407 return hn_vf_rss_hash_update(dev, rss_conf);
410 static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
411 struct rte_eth_rss_conf *rss_conf)
413 struct hn_data *hv = dev->data->dev_private;
415 PMD_INIT_FUNC_TRACE();
417 if (hv->ndis_ver < NDIS_VERSION_6_20) {
418 PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
422 rss_conf->rss_key_len = NDIS_HASH_KEYSIZE_TOEPLITZ;
423 if (rss_conf->rss_key)
424 memcpy(rss_conf->rss_key, hv->rss_key,
425 NDIS_HASH_KEYSIZE_TOEPLITZ);
427 rss_conf->rss_hf = 0;
428 if (hv->rss_hash & NDIS_HASH_IPV4)
429 rss_conf->rss_hf |= ETH_RSS_IPV4;
431 if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
432 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
434 if (hv->rss_hash & NDIS_HASH_IPV6)
435 rss_conf->rss_hf |= ETH_RSS_IPV6;
437 if (hv->rss_hash & NDIS_HASH_IPV6_EX)
438 rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
440 if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
441 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
443 if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
444 rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
450 hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
452 struct hn_data *hv = dev->data->dev_private;
454 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
455 return hn_vf_promiscuous_enable(dev);
459 hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
461 struct hn_data *hv = dev->data->dev_private;
464 filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
465 if (dev->data->all_multicast)
466 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
467 hn_rndis_set_rxfilter(hv, filter);
468 return hn_vf_promiscuous_disable(dev);
472 hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
474 struct hn_data *hv = dev->data->dev_private;
476 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
477 NDIS_PACKET_TYPE_ALL_MULTICAST |
478 NDIS_PACKET_TYPE_BROADCAST);
479 return hn_vf_allmulticast_enable(dev);
483 hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
485 struct hn_data *hv = dev->data->dev_private;
487 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
488 NDIS_PACKET_TYPE_BROADCAST);
489 return hn_vf_allmulticast_disable(dev);
493 hn_dev_mc_addr_list(struct rte_eth_dev *dev,
494 struct rte_ether_addr *mc_addr_set,
497 /* No filtering on the synthetic path, but can do it on VF */
498 return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
501 /* Setup shared rx/tx queue data */
502 static int hn_subchan_configure(struct hn_data *hv,
505 struct vmbus_channel *primary = hn_primary_chan(hv);
507 unsigned int retry = 0;
510 "open %u subchannels", subchan);
512 /* Send create sub channels command */
513 err = hn_nvs_alloc_subchans(hv, &subchan);
517 while (subchan > 0) {
518 struct vmbus_channel *new_sc;
521 err = rte_vmbus_subchan_open(primary, &new_sc);
522 if (err == -ENOENT && ++retry < 1000) {
523 /* This can happen if not ready yet */
530 "open subchannel failed: %d", err);
534 rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency);
537 chn_index = rte_vmbus_sub_channel_index(new_sc);
538 if (chn_index == 0 || chn_index > hv->max_queues) {
540 "Invalid subchannel offermsg channel %u",
545 PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
546 hv->channels[chn_index] = new_sc;
553 static void netvsc_hotplug_retry(void *args)
556 struct hn_data *hv = args;
557 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
558 struct rte_devargs *d = &hv->devargs;
564 struct rte_ether_addr eth_addr;
567 PMD_DRV_LOG(DEBUG, "%s: retry count %d",
568 __func__, hv->eal_hot_plug_retry);
570 if (hv->eal_hot_plug_retry++ > NETVSC_MAX_HOTADD_RETRY)
573 snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/net", d->name);
576 PMD_DRV_LOG(DEBUG, "%s: can't open directory %s, "
577 "retrying in 1 second", __func__, buf);
581 while ((dir = readdir(di))) {
582 /* Skip . and .. directories */
583 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
586 /* trying to get mac address if this is a network device*/
587 s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
589 PMD_DRV_LOG(ERR, "Failed to create socket errno %d",
593 strlcpy(req.ifr_name, dir->d_name, sizeof(req.ifr_name));
594 ret = ioctl(s, SIOCGIFHWADDR, &req);
598 "Failed to send SIOCGIFHWADDR for device %s",
602 if (req.ifr_hwaddr.sa_family != ARPHRD_ETHER) {
606 memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
607 RTE_DIM(eth_addr.addr_bytes));
609 if (rte_is_same_ether_addr(ð_addr, dev->data->mac_addrs)) {
611 "Found matching MAC address, adding device %s network name %s",
612 d->name, dir->d_name);
613 ret = rte_eal_hotplug_add(d->bus->name, d->name,
617 "Failed to add PCI device %s",
622 /* When the code reaches here, we either have already added
623 * the device, or its MAC address did not match.
630 /* The device is still being initialized, retry after 1 second */
631 rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hv);
635 netvsc_hotadd_callback(const char *device_name, enum rte_dev_event_type type,
638 struct hn_data *hv = arg;
639 struct rte_devargs *d = &hv->devargs;
642 PMD_DRV_LOG(INFO, "Device notification type=%d device_name=%s",
646 case RTE_DEV_EVENT_ADD:
647 /* if we already has a VF, don't check on hot add */
648 if (hv->vf_ctx.vf_state > vf_removed)
651 ret = rte_devargs_parse(d, device_name);
654 "devargs parsing failed ret=%d", ret);
658 if (!strcmp(d->bus->name, "pci")) {
659 /* Start the process of figuring out if this
660 * PCI device is a VF device
662 hv->eal_hot_plug_retry = 0;
663 rte_eal_alarm_set(1000000, netvsc_hotplug_retry, hv);
666 /* We will switch to VF on RDNIS configure message
676 static int hn_dev_configure(struct rte_eth_dev *dev)
678 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
679 struct rte_eth_rss_conf *rss_conf = &dev_conf->rx_adv_conf.rss_conf;
680 const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
681 const struct rte_eth_txmode *txmode = &dev_conf->txmode;
682 struct hn_data *hv = dev->data->dev_private;
683 uint64_t unsupported;
686 PMD_INIT_FUNC_TRACE();
688 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
689 dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
691 unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
694 "unsupported TX offload: %#" PRIx64,
699 unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
702 "unsupported RX offload: %#" PRIx64,
707 hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
709 err = hn_rndis_conf_offload(hv, txmode->offloads,
713 "offload configure failed");
717 hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
718 dev->data->nb_tx_queues);
720 for (i = 0; i < NDIS_HASH_INDCNT; i++)
721 hv->rss_ind[i] = i % dev->data->nb_rx_queues;
723 hn_rss_hash_init(hv, rss_conf);
725 subchan = hv->num_queues - 1;
727 err = hn_subchan_configure(hv, subchan);
730 "subchannel configuration failed");
734 err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
737 "rss disable failed");
741 if (rss_conf->rss_hf != 0) {
742 err = hn_rndis_conf_rss(hv, 0);
745 "initial RSS config failed");
751 return hn_vf_configure_locked(dev, dev_conf);
754 static int hn_dev_stats_get(struct rte_eth_dev *dev,
755 struct rte_eth_stats *stats)
759 hn_vf_stats_get(dev, stats);
761 for (i = 0; i < dev->data->nb_tx_queues; i++) {
762 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
767 stats->opackets += txq->stats.packets;
768 stats->obytes += txq->stats.bytes;
769 stats->oerrors += txq->stats.errors;
771 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
772 stats->q_opackets[i] = txq->stats.packets;
773 stats->q_obytes[i] = txq->stats.bytes;
777 for (i = 0; i < dev->data->nb_rx_queues; i++) {
778 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
783 stats->ipackets += rxq->stats.packets;
784 stats->ibytes += rxq->stats.bytes;
785 stats->ierrors += rxq->stats.errors;
786 stats->imissed += rxq->stats.ring_full;
788 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
789 stats->q_ipackets[i] = rxq->stats.packets;
790 stats->q_ibytes[i] = rxq->stats.bytes;
794 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
799 hn_dev_stats_reset(struct rte_eth_dev *dev)
803 PMD_INIT_FUNC_TRACE();
805 for (i = 0; i < dev->data->nb_tx_queues; i++) {
806 struct hn_tx_queue *txq = dev->data->tx_queues[i];
810 memset(&txq->stats, 0, sizeof(struct hn_stats));
813 for (i = 0; i < dev->data->nb_rx_queues; i++) {
814 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
819 memset(&rxq->stats, 0, sizeof(struct hn_stats));
826 hn_dev_xstats_reset(struct rte_eth_dev *dev)
830 ret = hn_dev_stats_reset(dev);
834 return hn_vf_xstats_reset(dev);
838 hn_dev_xstats_count(struct rte_eth_dev *dev)
842 count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings);
843 count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
845 ret = hn_vf_xstats_get_names(dev, NULL, 0);
853 hn_dev_xstats_get_names(struct rte_eth_dev *dev,
854 struct rte_eth_xstat_name *xstats_names,
857 unsigned int i, t, count = 0;
861 return hn_dev_xstats_count(dev);
863 /* Note: limit checked in rte_eth_xstats_names() */
864 for (i = 0; i < dev->data->nb_tx_queues; i++) {
865 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
873 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
874 snprintf(xstats_names[count++].name,
875 RTE_ETH_XSTATS_NAME_SIZE,
876 "tx_q%u_%s", i, hn_stat_strings[t].name);
879 for (i = 0; i < dev->data->nb_rx_queues; i++) {
880 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
888 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
889 snprintf(xstats_names[count++].name,
890 RTE_ETH_XSTATS_NAME_SIZE,
892 hn_stat_strings[t].name);
895 ret = hn_vf_xstats_get_names(dev, xstats_names + count,
904 hn_dev_xstats_get(struct rte_eth_dev *dev,
905 struct rte_eth_xstat *xstats,
908 unsigned int i, t, count = 0;
909 const unsigned int nstats = hn_dev_xstats_count(dev);
913 PMD_INIT_FUNC_TRACE();
918 for (i = 0; i < dev->data->nb_tx_queues; i++) {
919 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
924 stats = (const char *)&txq->stats;
925 for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
926 xstats[count].id = count;
927 xstats[count].value = *(const uint64_t *)
928 (stats + hn_stat_strings[t].offset);
932 for (i = 0; i < dev->data->nb_rx_queues; i++) {
933 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
938 stats = (const char *)&rxq->stats;
939 for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
940 xstats[count].id = count;
941 xstats[count].value = *(const uint64_t *)
942 (stats + hn_stat_strings[t].offset);
946 ret = hn_vf_xstats_get(dev, xstats, count, n);
954 hn_dev_start(struct rte_eth_dev *dev)
956 struct hn_data *hv = dev->data->dev_private;
959 PMD_INIT_FUNC_TRACE();
961 /* Register to monitor hot plug events */
962 error = rte_dev_event_callback_register(NULL, netvsc_hotadd_callback,
965 PMD_DRV_LOG(ERR, "failed to register device event callback");
969 error = hn_rndis_set_rxfilter(hv,
970 NDIS_PACKET_TYPE_BROADCAST |
971 NDIS_PACKET_TYPE_ALL_MULTICAST |
972 NDIS_PACKET_TYPE_DIRECTED);
976 error = hn_vf_start(dev);
978 hn_rndis_set_rxfilter(hv, 0);
980 /* Initialize Link state */
982 hn_dev_link_update(dev, 0);
988 hn_dev_stop(struct rte_eth_dev *dev)
990 struct hn_data *hv = dev->data->dev_private;
992 PMD_INIT_FUNC_TRACE();
993 dev->data->dev_started = 0;
995 rte_dev_event_callback_unregister(NULL, netvsc_hotadd_callback, hv);
996 hn_rndis_set_rxfilter(hv, 0);
997 return hn_vf_stop(dev);
1001 hn_dev_close(struct rte_eth_dev *dev)
1004 struct hn_data *hv = dev->data->dev_private;
1006 PMD_INIT_FUNC_TRACE();
1007 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1010 rte_eal_alarm_cancel(netvsc_hotplug_retry, &hv->devargs);
1012 ret = hn_vf_close(dev);
1013 hn_dev_free_queues(dev);
1018 static const struct eth_dev_ops hn_eth_dev_ops = {
1019 .dev_configure = hn_dev_configure,
1020 .dev_start = hn_dev_start,
1021 .dev_stop = hn_dev_stop,
1022 .dev_close = hn_dev_close,
1023 .dev_infos_get = hn_dev_info_get,
1024 .txq_info_get = hn_dev_tx_queue_info,
1025 .rxq_info_get = hn_dev_rx_queue_info,
1026 .dev_supported_ptypes_get = hn_vf_supported_ptypes,
1027 .promiscuous_enable = hn_dev_promiscuous_enable,
1028 .promiscuous_disable = hn_dev_promiscuous_disable,
1029 .allmulticast_enable = hn_dev_allmulticast_enable,
1030 .allmulticast_disable = hn_dev_allmulticast_disable,
1031 .set_mc_addr_list = hn_dev_mc_addr_list,
1032 .reta_update = hn_rss_reta_update,
1033 .reta_query = hn_rss_reta_query,
1034 .rss_hash_update = hn_rss_hash_update,
1035 .rss_hash_conf_get = hn_rss_hash_conf_get,
1036 .tx_queue_setup = hn_dev_tx_queue_setup,
1037 .tx_queue_release = hn_dev_tx_queue_release,
1038 .tx_done_cleanup = hn_dev_tx_done_cleanup,
1039 .rx_queue_setup = hn_dev_rx_queue_setup,
1040 .rx_queue_release = hn_dev_rx_queue_release,
1041 .link_update = hn_dev_link_update,
1042 .stats_get = hn_dev_stats_get,
1043 .stats_reset = hn_dev_stats_reset,
1044 .xstats_get = hn_dev_xstats_get,
1045 .xstats_get_names = hn_dev_xstats_get_names,
1046 .xstats_reset = hn_dev_xstats_reset,
1050 * Setup connection between PMD and kernel.
1053 hn_attach(struct hn_data *hv, unsigned int mtu)
1058 error = hn_nvs_attach(hv, mtu);
1063 error = hn_rndis_attach(hv);
1069 * Under certain conditions on certain versions of Hyper-V,
1070 * the RNDIS rxfilter is _not_ zero on the hypervisor side
1071 * after the successful RNDIS initialization.
1073 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
1082 hn_detach(struct hn_data *hv)
1085 hn_rndis_detach(hv);
1089 eth_hn_dev_init(struct rte_eth_dev *eth_dev)
1091 struct hn_data *hv = eth_dev->data->dev_private;
1092 struct rte_device *device = eth_dev->device;
1093 struct rte_vmbus_device *vmbus;
1094 unsigned int rxr_cnt;
1097 PMD_INIT_FUNC_TRACE();
1099 vmbus = container_of(device, struct rte_vmbus_device, device);
1100 eth_dev->dev_ops = &hn_eth_dev_ops;
1101 eth_dev->rx_queue_count = hn_dev_rx_queue_count;
1102 eth_dev->rx_descriptor_status = hn_dev_rx_queue_status;
1103 eth_dev->tx_descriptor_status = hn_dev_tx_descriptor_status;
1104 eth_dev->tx_pkt_burst = &hn_xmit_pkts;
1105 eth_dev->rx_pkt_burst = &hn_recv_pkts;
1108 * for secondary processes, we don't initialize any further as primary
1109 * has already done this work.
1111 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1114 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1116 /* Since Hyper-V only supports one MAC address */
1117 eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
1118 sizeof(struct rte_ether_addr), 0);
1119 if (eth_dev->data->mac_addrs == NULL) {
1121 "Failed to allocate memory store MAC addresses");
1126 hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
1127 hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
1128 hv->port_id = eth_dev->data->port_id;
1129 hv->latency = HN_CHAN_LATENCY_NS;
1130 hv->rx_copybreak = HN_RXCOPY_THRESHOLD;
1131 hv->tx_copybreak = HN_TXCOPY_THRESHOLD;
1132 hv->rx_extmbuf_enable = HN_RX_EXTMBUF_ENABLE;
1135 rte_rwlock_init(&hv->vf_lock);
1136 hv->vf_ctx.vf_vsc_switched = false;
1137 hv->vf_ctx.vf_vsp_reported = false;
1138 hv->vf_ctx.vf_attached = false;
1139 hv->vf_ctx.vf_state = vf_unknown;
1141 err = hn_parse_args(eth_dev);
1145 strlcpy(hv->owner.name, eth_dev->device->name,
1146 RTE_ETH_MAX_OWNER_NAME_LEN);
1147 err = rte_eth_dev_owner_new(&hv->owner.id);
1149 PMD_INIT_LOG(ERR, "Can not get owner id");
1153 /* Initialize primary channel input for control operations */
1154 err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
1158 rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency);
1160 hv->primary = hn_rx_queue_alloc(hv, 0,
1161 eth_dev->device->numa_node);
1166 err = hn_attach(hv, RTE_ETHER_MTU);
1170 err = hn_chim_init(eth_dev);
1174 err = hn_rndis_get_eaddr(hv, eth_dev->data->mac_addrs->addr_bytes);
1178 /* Multi queue requires later versions of windows server */
1179 if (hv->nvs_ver < NVS_VERSION_5)
1182 max_chan = rte_vmbus_max_channels(vmbus);
1183 PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
1187 if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
1190 hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
1192 /* If VF was reported but not added, do it now */
1193 if (hv->vf_ctx.vf_vsp_reported && !hv->vf_ctx.vf_vsc_switched) {
1194 PMD_INIT_LOG(DEBUG, "Adding VF device");
1196 err = hn_vf_add(eth_dev, hv);
1202 PMD_INIT_LOG(NOTICE, "device init failed");
1204 hn_chim_uninit(eth_dev);
1210 eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
1212 struct hn_data *hv = eth_dev->data->dev_private;
1215 PMD_INIT_FUNC_TRACE();
1217 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1220 ret_stop = hn_dev_stop(eth_dev);
1221 hn_dev_close(eth_dev);
1224 hn_chim_uninit(eth_dev);
1225 rte_vmbus_chan_close(hv->primary->chan);
1226 rte_free(hv->primary);
1227 ret = rte_eth_dev_owner_delete(hv->owner.id);
1234 static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
1235 struct rte_vmbus_device *dev)
1237 struct rte_eth_dev *eth_dev;
1240 PMD_INIT_FUNC_TRACE();
1242 ret = rte_dev_event_monitor_start();
1244 PMD_DRV_LOG(ERR, "Failed to start device event monitoring");
1248 eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
1252 ret = eth_hn_dev_init(eth_dev);
1254 eth_dev_vmbus_release(eth_dev);
1255 rte_dev_event_monitor_stop();
1257 rte_eth_dev_probing_finish(eth_dev);
1263 static int eth_hn_remove(struct rte_vmbus_device *dev)
1265 struct rte_eth_dev *eth_dev;
1268 PMD_INIT_FUNC_TRACE();
1270 eth_dev = rte_eth_dev_allocated(dev->device.name);
1272 return 0; /* port already released */
1274 ret = eth_hn_dev_uninit(eth_dev);
1278 eth_dev_vmbus_release(eth_dev);
1279 rte_dev_event_monitor_stop();
1283 /* Network device GUID */
1284 static const rte_uuid_t hn_net_ids[] = {
1285 /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
1286 RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
1290 static struct rte_vmbus_driver rte_netvsc_pmd = {
1291 .id_table = hn_net_ids,
1292 .probe = eth_hn_probe,
1293 .remove = eth_hn_remove,
1296 RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
1297 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
1298 RTE_LOG_REGISTER(hn_logtype_init, pmd.net.netvsc.init, NOTICE);
1299 RTE_LOG_REGISTER(hn_logtype_driver, pmd.net.netvsc.driver, NOTICE);
1300 RTE_PMD_REGISTER_PARAM_STRING(net_netvsc,
1301 NETVSC_ARG_LATENCY "=<uint32> "
1302 NETVSC_ARG_RXBREAK "=<uint32> "
1303 NETVSC_ARG_TXBREAK "=<uint32> "
1304 NETVSC_ARG_RX_EXTMBUF_ENABLE "=<0|1>");