1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
13 #include <rte_ethdev.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
17 #include <rte_devargs.h>
18 #include <rte_malloc.h>
19 #include <rte_kvargs.h>
20 #include <rte_atomic.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_cycles.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
29 #include <rte_bus_vmbus.h>
37 #define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
38 DEV_TX_OFFLOAD_TCP_CKSUM | \
39 DEV_TX_OFFLOAD_UDP_CKSUM | \
40 DEV_TX_OFFLOAD_TCP_TSO | \
41 DEV_TX_OFFLOAD_MULTI_SEGS | \
42 DEV_TX_OFFLOAD_VLAN_INSERT)
44 #define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
45 DEV_RX_OFFLOAD_VLAN_STRIP)
48 int hn_logtype_driver;
50 struct hn_xstats_name_off {
51 char name[RTE_ETH_XSTATS_NAME_SIZE];
55 static const struct hn_xstats_name_off hn_stat_strings[] = {
56 { "good_packets", offsetof(struct hn_stats, packets) },
57 { "good_bytes", offsetof(struct hn_stats, bytes) },
58 { "errors", offsetof(struct hn_stats, errors) },
59 { "ring full", offsetof(struct hn_stats, ring_full) },
60 { "multicast_packets", offsetof(struct hn_stats, multicast) },
61 { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
62 { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
63 { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
64 { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
65 { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
66 { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
67 { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
68 { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
69 { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
72 /* The default RSS key.
73 * This value is the same as MLX5 so that flows will be
74 * received on same path for both VF ans synthetic NIC.
76 static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
77 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
78 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
79 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
80 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
81 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
84 static struct rte_eth_dev *
85 eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
87 struct rte_eth_dev *eth_dev;
93 name = dev->device.name;
95 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
96 eth_dev = rte_eth_dev_allocate(name);
98 PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
102 if (private_data_size) {
103 eth_dev->data->dev_private =
104 rte_zmalloc_socket(name, private_data_size,
105 RTE_CACHE_LINE_SIZE, dev->device.numa_node);
106 if (!eth_dev->data->dev_private) {
107 PMD_DRV_LOG(NOTICE, "can not allocate driver data");
108 rte_eth_dev_release_port(eth_dev);
113 eth_dev = rte_eth_dev_attach_secondary(name);
115 PMD_DRV_LOG(NOTICE, "can not attach secondary");
120 eth_dev->device = &dev->device;
122 /* interrupt is simulated */
123 dev->intr_handle.type = RTE_INTR_HANDLE_EXT;
124 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
125 eth_dev->intr_handle = &dev->intr_handle;
127 /* allow ethdev to remove on close */
128 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
134 eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
136 /* mac_addrs must not be freed alone because part of dev_private */
137 eth_dev->data->mac_addrs = NULL;
138 /* free ether device */
139 rte_eth_dev_release_port(eth_dev);
141 eth_dev->device = NULL;
142 eth_dev->intr_handle = NULL;
145 /* handle "latency=X" from devargs */
146 static int hn_set_latency(const char *key, const char *value, void *opaque)
148 struct hn_data *hv = opaque;
153 lat = strtoul(value, &endp, 0);
155 if (*value == '\0' || *endp != '\0') {
156 PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
160 PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat);
162 hv->latency = lat * 1000; /* usec to nsec */
166 /* Parse device arguments */
167 static int hn_parse_args(const struct rte_eth_dev *dev)
169 struct hn_data *hv = dev->data->dev_private;
170 struct rte_devargs *devargs = dev->device->devargs;
171 static const char * const valid_keys[] = {
175 struct rte_kvargs *kvlist;
181 PMD_INIT_LOG(DEBUG, "device args %s %s",
182 devargs->name, devargs->args);
184 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
186 PMD_DRV_LOG(NOTICE, "invalid parameters");
190 ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
192 PMD_DRV_LOG(ERR, "Unable to process latency arg\n");
194 rte_kvargs_free(kvlist);
198 /* Update link status.
199 * Note: the DPDK definition of "wait_to_complete"
200 * means block this call until link is up.
201 * which is not worth supporting.
204 hn_dev_link_update(struct rte_eth_dev *dev,
205 int wait_to_complete)
207 struct hn_data *hv = dev->data->dev_private;
208 struct rte_eth_link link, old;
211 old = dev->data->dev_link;
213 error = hn_rndis_get_linkstatus(hv);
217 hn_rndis_get_linkspeed(hv);
219 hn_vf_link_update(dev, wait_to_complete);
221 link = (struct rte_eth_link) {
222 .link_duplex = ETH_LINK_FULL_DUPLEX,
223 .link_autoneg = ETH_LINK_SPEED_FIXED,
224 .link_speed = hv->link_speed / 10000,
227 if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
228 link.link_status = ETH_LINK_UP;
230 link.link_status = ETH_LINK_DOWN;
232 if (old.link_status == link.link_status)
235 PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
236 (link.link_status == ETH_LINK_UP) ? "up" : "down");
238 return rte_eth_linkstatus_set(dev, &link);
241 static void hn_dev_info_get(struct rte_eth_dev *dev,
242 struct rte_eth_dev_info *dev_info)
244 struct hn_data *hv = dev->data->dev_private;
246 dev_info->speed_capa = ETH_LINK_SPEED_10G;
247 dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
248 dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
249 dev_info->max_mac_addrs = 1;
251 dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
252 dev_info->flow_type_rss_offloads = hv->rss_offloads;
253 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
255 dev_info->max_rx_queues = hv->max_queues;
256 dev_info->max_tx_queues = hv->max_queues;
258 hn_rndis_get_offload(hv, dev_info);
259 hn_vf_info_get(hv, dev_info);
262 static int hn_rss_reta_update(struct rte_eth_dev *dev,
263 struct rte_eth_rss_reta_entry64 *reta_conf,
266 struct hn_data *hv = dev->data->dev_private;
270 PMD_INIT_FUNC_TRACE();
272 if (reta_size != NDIS_HASH_INDCNT) {
273 PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
277 for (i = 0; i < NDIS_HASH_INDCNT; i++) {
278 uint16_t idx = i / RTE_RETA_GROUP_SIZE;
279 uint16_t shift = i % RTE_RETA_GROUP_SIZE;
280 uint64_t mask = (uint64_t)1 << shift;
282 if (reta_conf[idx].mask & mask)
283 hv->rss_ind[i] = reta_conf[idx].reta[shift];
286 err = hn_rndis_conf_rss(hv, 0);
289 "reta reconfig failed");
293 return hn_vf_reta_hash_update(dev, reta_conf, reta_size);
296 static int hn_rss_reta_query(struct rte_eth_dev *dev,
297 struct rte_eth_rss_reta_entry64 *reta_conf,
300 struct hn_data *hv = dev->data->dev_private;
303 PMD_INIT_FUNC_TRACE();
305 if (reta_size != NDIS_HASH_INDCNT) {
306 PMD_DRV_LOG(ERR, "Hash lookup table size does not match NDIS");
310 for (i = 0; i < NDIS_HASH_INDCNT; i++) {
311 uint16_t idx = i / RTE_RETA_GROUP_SIZE;
312 uint16_t shift = i % RTE_RETA_GROUP_SIZE;
313 uint64_t mask = (uint64_t)1 << shift;
315 if (reta_conf[idx].mask & mask)
316 reta_conf[idx].reta[shift] = hv->rss_ind[i];
321 static void hn_rss_hash_init(struct hn_data *hv,
322 const struct rte_eth_rss_conf *rss_conf)
324 /* Convert from DPDK RSS hash flags to NDIS hash flags */
325 hv->rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
327 if (rss_conf->rss_hf & ETH_RSS_IPV4)
328 hv->rss_hash |= NDIS_HASH_IPV4;
329 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
330 hv->rss_hash |= NDIS_HASH_TCP_IPV4;
331 if (rss_conf->rss_hf & ETH_RSS_IPV6)
332 hv->rss_hash |= NDIS_HASH_IPV6;
333 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX)
334 hv->rss_hash |= NDIS_HASH_IPV6_EX;
335 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
336 hv->rss_hash |= NDIS_HASH_TCP_IPV6;
337 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX)
338 hv->rss_hash |= NDIS_HASH_TCP_IPV6_EX;
340 memcpy(hv->rss_key, rss_conf->rss_key ? : rss_default_key,
341 NDIS_HASH_KEYSIZE_TOEPLITZ);
344 static int hn_rss_hash_update(struct rte_eth_dev *dev,
345 struct rte_eth_rss_conf *rss_conf)
347 struct hn_data *hv = dev->data->dev_private;
350 PMD_INIT_FUNC_TRACE();
352 err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
355 "rss disable failed");
359 hn_rss_hash_init(hv, rss_conf);
361 err = hn_rndis_conf_rss(hv, 0);
364 "rss reconfig failed (RSS disabled)");
369 return hn_vf_rss_hash_update(dev, rss_conf);
372 static int hn_rss_hash_conf_get(struct rte_eth_dev *dev,
373 struct rte_eth_rss_conf *rss_conf)
375 struct hn_data *hv = dev->data->dev_private;
377 PMD_INIT_FUNC_TRACE();
379 if (hv->ndis_ver < NDIS_VERSION_6_20) {
380 PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
384 rss_conf->rss_key_len = NDIS_HASH_KEYSIZE_TOEPLITZ;
385 if (rss_conf->rss_key)
386 memcpy(rss_conf->rss_key, hv->rss_key,
387 NDIS_HASH_KEYSIZE_TOEPLITZ);
389 rss_conf->rss_hf = 0;
390 if (hv->rss_hash & NDIS_HASH_IPV4)
391 rss_conf->rss_hf |= ETH_RSS_IPV4;
393 if (hv->rss_hash & NDIS_HASH_TCP_IPV4)
394 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
396 if (hv->rss_hash & NDIS_HASH_IPV6)
397 rss_conf->rss_hf |= ETH_RSS_IPV6;
399 if (hv->rss_hash & NDIS_HASH_IPV6_EX)
400 rss_conf->rss_hf |= ETH_RSS_IPV6_EX;
402 if (hv->rss_hash & NDIS_HASH_TCP_IPV6)
403 rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
405 if (hv->rss_hash & NDIS_HASH_TCP_IPV6_EX)
406 rss_conf->rss_hf |= ETH_RSS_IPV6_TCP_EX;
412 hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
414 struct hn_data *hv = dev->data->dev_private;
416 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
417 hn_vf_promiscuous_enable(dev);
421 hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
423 struct hn_data *hv = dev->data->dev_private;
426 filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
427 if (dev->data->all_multicast)
428 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
429 hn_rndis_set_rxfilter(hv, filter);
430 hn_vf_promiscuous_disable(dev);
434 hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
436 struct hn_data *hv = dev->data->dev_private;
438 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
439 NDIS_PACKET_TYPE_ALL_MULTICAST |
440 NDIS_PACKET_TYPE_BROADCAST);
441 hn_vf_allmulticast_enable(dev);
445 hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
447 struct hn_data *hv = dev->data->dev_private;
449 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
450 NDIS_PACKET_TYPE_BROADCAST);
451 hn_vf_allmulticast_disable(dev);
455 hn_dev_mc_addr_list(struct rte_eth_dev *dev,
456 struct rte_ether_addr *mc_addr_set,
459 /* No filtering on the synthetic path, but can do it on VF */
460 return hn_vf_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
463 /* Setup shared rx/tx queue data */
464 static int hn_subchan_configure(struct hn_data *hv,
467 struct vmbus_channel *primary = hn_primary_chan(hv);
469 unsigned int retry = 0;
472 "open %u subchannels", subchan);
474 /* Send create sub channels command */
475 err = hn_nvs_alloc_subchans(hv, &subchan);
479 while (subchan > 0) {
480 struct vmbus_channel *new_sc;
483 err = rte_vmbus_subchan_open(primary, &new_sc);
484 if (err == -ENOENT && ++retry < 1000) {
485 /* This can happen if not ready yet */
492 "open subchannel failed: %d", err);
496 rte_vmbus_set_latency(hv->vmbus, new_sc, hv->latency);
499 chn_index = rte_vmbus_sub_channel_index(new_sc);
500 if (chn_index == 0 || chn_index > hv->max_queues) {
502 "Invalid subchannel offermsg channel %u",
507 PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
508 hv->channels[chn_index] = new_sc;
515 static int hn_dev_configure(struct rte_eth_dev *dev)
517 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
518 struct rte_eth_rss_conf *rss_conf = &dev_conf->rx_adv_conf.rss_conf;
519 const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
520 const struct rte_eth_txmode *txmode = &dev_conf->txmode;
521 struct hn_data *hv = dev->data->dev_private;
522 uint64_t unsupported;
525 PMD_INIT_FUNC_TRACE();
527 unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
530 "unsupported TX offload: %#" PRIx64,
535 unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
538 "unsupported RX offload: %#" PRIx64,
543 hv->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
545 err = hn_rndis_conf_offload(hv, txmode->offloads,
549 "offload configure failed");
553 hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
554 dev->data->nb_tx_queues);
556 for (i = 0; i < NDIS_HASH_INDCNT; i++)
557 hv->rss_ind[i] = i % hv->num_queues;
559 hn_rss_hash_init(hv, rss_conf);
561 subchan = hv->num_queues - 1;
563 err = hn_subchan_configure(hv, subchan);
566 "subchannel configuration failed");
570 err = hn_rndis_conf_rss(hv, 0);
573 "initial RSS config failed");
578 return hn_vf_configure(dev, dev_conf);
581 static int hn_dev_stats_get(struct rte_eth_dev *dev,
582 struct rte_eth_stats *stats)
586 hn_vf_stats_get(dev, stats);
588 for (i = 0; i < dev->data->nb_tx_queues; i++) {
589 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
594 stats->opackets += txq->stats.packets;
595 stats->obytes += txq->stats.bytes;
596 stats->oerrors += txq->stats.errors;
598 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
599 stats->q_opackets[i] = txq->stats.packets;
600 stats->q_obytes[i] = txq->stats.bytes;
604 for (i = 0; i < dev->data->nb_rx_queues; i++) {
605 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
610 stats->ipackets += rxq->stats.packets;
611 stats->ibytes += rxq->stats.bytes;
612 stats->ierrors += rxq->stats.errors;
613 stats->imissed += rxq->stats.ring_full;
615 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
616 stats->q_ipackets[i] = rxq->stats.packets;
617 stats->q_ibytes[i] = rxq->stats.bytes;
621 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
626 hn_dev_stats_reset(struct rte_eth_dev *dev)
630 PMD_INIT_FUNC_TRACE();
632 for (i = 0; i < dev->data->nb_tx_queues; i++) {
633 struct hn_tx_queue *txq = dev->data->tx_queues[i];
637 memset(&txq->stats, 0, sizeof(struct hn_stats));
640 for (i = 0; i < dev->data->nb_rx_queues; i++) {
641 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
646 memset(&rxq->stats, 0, sizeof(struct hn_stats));
651 hn_dev_xstats_reset(struct rte_eth_dev *dev)
653 hn_dev_stats_reset(dev);
654 hn_vf_xstats_reset(dev);
658 hn_dev_xstats_count(struct rte_eth_dev *dev)
662 count = dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings);
663 count += dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
665 ret = hn_vf_xstats_get_names(dev, NULL, 0);
673 hn_dev_xstats_get_names(struct rte_eth_dev *dev,
674 struct rte_eth_xstat_name *xstats_names,
677 unsigned int i, t, count = 0;
681 return hn_dev_xstats_count(dev);
683 /* Note: limit checked in rte_eth_xstats_names() */
684 for (i = 0; i < dev->data->nb_tx_queues; i++) {
685 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
693 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
694 snprintf(xstats_names[count++].name,
695 RTE_ETH_XSTATS_NAME_SIZE,
696 "tx_q%u_%s", i, hn_stat_strings[t].name);
699 for (i = 0; i < dev->data->nb_rx_queues; i++) {
700 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
708 for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
709 snprintf(xstats_names[count++].name,
710 RTE_ETH_XSTATS_NAME_SIZE,
712 hn_stat_strings[t].name);
715 ret = hn_vf_xstats_get_names(dev, xstats_names + count,
724 hn_dev_xstats_get(struct rte_eth_dev *dev,
725 struct rte_eth_xstat *xstats,
728 unsigned int i, t, count = 0;
729 const unsigned int nstats = hn_dev_xstats_count(dev);
733 PMD_INIT_FUNC_TRACE();
738 for (i = 0; i < dev->data->nb_tx_queues; i++) {
739 const struct hn_tx_queue *txq = dev->data->tx_queues[i];
744 stats = (const char *)&txq->stats;
745 for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
746 xstats[count].id = count;
747 xstats[count].value = *(const uint64_t *)
748 (stats + hn_stat_strings[t].offset);
752 for (i = 0; i < dev->data->nb_rx_queues; i++) {
753 const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
758 stats = (const char *)&rxq->stats;
759 for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) {
760 xstats[count].id = count;
761 xstats[count].value = *(const uint64_t *)
762 (stats + hn_stat_strings[t].offset);
766 ret = hn_vf_xstats_get(dev, xstats + count, n - count);
774 hn_dev_start(struct rte_eth_dev *dev)
776 struct hn_data *hv = dev->data->dev_private;
779 PMD_INIT_FUNC_TRACE();
781 error = hn_rndis_set_rxfilter(hv,
782 NDIS_PACKET_TYPE_BROADCAST |
783 NDIS_PACKET_TYPE_ALL_MULTICAST |
784 NDIS_PACKET_TYPE_DIRECTED);
788 error = hn_vf_start(dev);
790 hn_rndis_set_rxfilter(hv, 0);
796 hn_dev_stop(struct rte_eth_dev *dev)
798 struct hn_data *hv = dev->data->dev_private;
800 PMD_INIT_FUNC_TRACE();
802 hn_rndis_set_rxfilter(hv, 0);
807 hn_dev_close(struct rte_eth_dev *dev)
809 PMD_INIT_FUNC_TRACE();
812 hn_dev_free_queues(dev);
815 static const struct eth_dev_ops hn_eth_dev_ops = {
816 .dev_configure = hn_dev_configure,
817 .dev_start = hn_dev_start,
818 .dev_stop = hn_dev_stop,
819 .dev_close = hn_dev_close,
820 .dev_infos_get = hn_dev_info_get,
821 .dev_supported_ptypes_get = hn_vf_supported_ptypes,
822 .promiscuous_enable = hn_dev_promiscuous_enable,
823 .promiscuous_disable = hn_dev_promiscuous_disable,
824 .allmulticast_enable = hn_dev_allmulticast_enable,
825 .allmulticast_disable = hn_dev_allmulticast_disable,
826 .set_mc_addr_list = hn_dev_mc_addr_list,
827 .reta_update = hn_rss_reta_update,
828 .reta_query = hn_rss_reta_query,
829 .rss_hash_update = hn_rss_hash_update,
830 .rss_hash_conf_get = hn_rss_hash_conf_get,
831 .tx_queue_setup = hn_dev_tx_queue_setup,
832 .tx_queue_release = hn_dev_tx_queue_release,
833 .tx_done_cleanup = hn_dev_tx_done_cleanup,
834 .rx_queue_setup = hn_dev_rx_queue_setup,
835 .rx_queue_release = hn_dev_rx_queue_release,
836 .link_update = hn_dev_link_update,
837 .stats_get = hn_dev_stats_get,
838 .stats_reset = hn_dev_stats_reset,
839 .xstats_get = hn_dev_xstats_get,
840 .xstats_get_names = hn_dev_xstats_get_names,
841 .xstats_reset = hn_dev_xstats_reset,
845 * Setup connection between PMD and kernel.
848 hn_attach(struct hn_data *hv, unsigned int mtu)
853 error = hn_nvs_attach(hv, mtu);
858 error = hn_rndis_attach(hv);
864 * Under certain conditions on certain versions of Hyper-V,
865 * the RNDIS rxfilter is _not_ zero on the hypervisor side
866 * after the successful RNDIS initialization.
868 hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
877 hn_detach(struct hn_data *hv)
884 eth_hn_dev_init(struct rte_eth_dev *eth_dev)
886 struct hn_data *hv = eth_dev->data->dev_private;
887 struct rte_device *device = eth_dev->device;
888 struct rte_vmbus_device *vmbus;
889 unsigned int rxr_cnt;
892 PMD_INIT_FUNC_TRACE();
894 vmbus = container_of(device, struct rte_vmbus_device, device);
895 eth_dev->dev_ops = &hn_eth_dev_ops;
896 eth_dev->tx_pkt_burst = &hn_xmit_pkts;
897 eth_dev->rx_pkt_burst = &hn_recv_pkts;
900 * for secondary processes, we don't initialize any further as primary
901 * has already done this work.
903 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
906 /* Since Hyper-V only supports one MAC address, just use local data */
907 eth_dev->data->mac_addrs = &hv->mac_addr;
910 hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
911 hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
912 hv->port_id = eth_dev->data->port_id;
913 hv->latency = HN_CHAN_LATENCY_NS;
915 rte_spinlock_init(&hv->vf_lock);
916 hv->vf_port = HN_INVALID_PORT;
918 err = hn_parse_args(eth_dev);
922 strlcpy(hv->owner.name, eth_dev->device->name,
923 RTE_ETH_MAX_OWNER_NAME_LEN);
924 err = rte_eth_dev_owner_new(&hv->owner.id);
926 PMD_INIT_LOG(ERR, "Can not get owner id");
930 /* Initialize primary channel input for control operations */
931 err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
935 rte_vmbus_set_latency(hv->vmbus, hv->channels[0], hv->latency);
937 hv->primary = hn_rx_queue_alloc(hv, 0,
938 eth_dev->device->numa_node);
943 err = hn_attach(hv, RTE_ETHER_MTU);
947 err = hn_tx_pool_init(eth_dev);
951 err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
955 /* Multi queue requires later versions of windows server */
956 if (hv->nvs_ver < NVS_VERSION_5)
959 max_chan = rte_vmbus_max_channels(vmbus);
960 PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
964 if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
967 hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
969 /* If VF was reported but not added, do it now */
970 if (hv->vf_present && !hn_vf_attached(hv)) {
971 PMD_INIT_LOG(DEBUG, "Adding VF device");
973 err = hn_vf_add(eth_dev, hv);
981 PMD_INIT_LOG(NOTICE, "device init failed");
983 hn_tx_pool_uninit(eth_dev);
989 eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
991 struct hn_data *hv = eth_dev->data->dev_private;
993 PMD_INIT_FUNC_TRACE();
995 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
998 hn_dev_stop(eth_dev);
999 hn_dev_close(eth_dev);
1001 eth_dev->dev_ops = NULL;
1002 eth_dev->tx_pkt_burst = NULL;
1003 eth_dev->rx_pkt_burst = NULL;
1006 hn_tx_pool_uninit(eth_dev);
1007 rte_vmbus_chan_close(hv->primary->chan);
1008 rte_free(hv->primary);
1009 rte_eth_dev_owner_delete(hv->owner.id);
1014 static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
1015 struct rte_vmbus_device *dev)
1017 struct rte_eth_dev *eth_dev;
1020 PMD_INIT_FUNC_TRACE();
1022 eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
1026 ret = eth_hn_dev_init(eth_dev);
1028 eth_dev_vmbus_release(eth_dev);
1030 rte_eth_dev_probing_finish(eth_dev);
1035 static int eth_hn_remove(struct rte_vmbus_device *dev)
1037 struct rte_eth_dev *eth_dev;
1040 PMD_INIT_FUNC_TRACE();
1042 eth_dev = rte_eth_dev_allocated(dev->device.name);
1046 ret = eth_hn_dev_uninit(eth_dev);
1050 eth_dev_vmbus_release(eth_dev);
1054 /* Network device GUID */
1055 static const rte_uuid_t hn_net_ids[] = {
1056 /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
1057 RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
1061 static struct rte_vmbus_driver rte_netvsc_pmd = {
1062 .id_table = hn_net_ids,
1063 .probe = eth_hn_probe,
1064 .remove = eth_hn_remove,
1067 RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
1068 RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
1070 RTE_INIT(hn_init_log)
1072 hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
1073 if (hn_logtype_init >= 0)
1074 rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
1075 hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
1076 if (hn_logtype_driver >= 0)
1077 rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);