1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
14 #include <sys/types.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
27 #include <rte_alarm.h>
33 /* Search for VF with matching MAC address, return port id */
34 static int hn_vf_match(const struct rte_eth_dev *dev)
36 const struct rte_ether_addr *mac = dev->data->mac_addrs;
39 RTE_ETH_FOREACH_DEV(i) {
40 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
41 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
46 if (rte_is_same_ether_addr(mac, vf_mac))
54 * Attach new PCI VF device and return the port_id
56 static int hn_vf_attach(struct rte_eth_dev *dev, struct hn_data *hv)
58 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
61 if (hv->vf_ctx.vf_attached) {
62 PMD_DRV_LOG(ERR, "VF already attached");
66 port = hn_vf_match(dev);
68 PMD_DRV_LOG(NOTICE, "Couldn't find port for VF");
72 PMD_DRV_LOG(NOTICE, "found matching VF port %d", port);
73 ret = rte_eth_dev_owner_get(port, &owner);
75 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port);
79 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
80 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
85 ret = rte_eth_dev_owner_set(port, &hv->owner);
87 PMD_DRV_LOG(ERR, "Can set owner for port %d", port);
91 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port);
92 hv->vf_ctx.vf_attached = true;
93 hv->vf_ctx.vf_port = port;
97 static void hn_vf_remove(struct hn_data *hv);
99 static void hn_remove_delayed(void *args)
101 struct hn_data *hv = args;
102 uint16_t port_id = hv->vf_ctx.vf_port;
103 struct rte_device *dev = rte_eth_devices[port_id].device;
106 /* Tell VSP to switch data path to synthetic */
109 PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
110 rte_rwlock_write_lock(&hv->vf_lock);
112 /* Give back ownership */
113 ret = rte_eth_dev_owner_unset(port_id, hv->owner.id);
115 PMD_DRV_LOG(ERR, "rte_eth_dev_owner_unset failed ret=%d",
117 hv->vf_ctx.vf_attached = false;
119 ret = rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_RMV,
120 hn_eth_rmv_event_callback, hv);
123 "rte_eth_dev_callback_unregister failed ret=%d",
126 /* Detach and release port_id from system */
127 ret = rte_eth_dev_stop(port_id);
129 PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d",
132 ret = rte_eth_dev_close(port_id);
134 PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d",
137 ret = rte_dev_remove(dev);
138 hv->vf_ctx.vf_state = vf_removed;
140 rte_rwlock_write_unlock(&hv->vf_lock);
143 int hn_eth_rmv_event_callback(uint16_t port_id,
144 enum rte_eth_event_type event __rte_unused,
145 void *cb_arg, void *out __rte_unused)
147 struct hn_data *hv = cb_arg;
149 PMD_DRV_LOG(NOTICE, "Removing VF portid %d", port_id);
150 rte_eal_alarm_set(1, hn_remove_delayed, hv);
155 static int hn_setup_vf_queues(int port, struct rte_eth_dev *dev)
157 struct hn_rx_queue *rx_queue;
158 struct rte_eth_txq_info txinfo;
159 struct rte_eth_rxq_info rxinfo;
162 for (i = 0; i < dev->data->nb_tx_queues; i++) {
163 ret = rte_eth_tx_queue_info_get(dev->data->port_id, i, &txinfo);
166 "rte_eth_tx_queue_info_get failed ret=%d",
171 ret = rte_eth_tx_queue_setup(port, i, txinfo.nb_desc, 0,
175 "rte_eth_tx_queue_setup failed ret=%d",
181 for (i = 0; i < dev->data->nb_rx_queues; i++) {
182 ret = rte_eth_rx_queue_info_get(dev->data->port_id, i, &rxinfo);
185 "rte_eth_rx_queue_info_get failed ret=%d",
190 rx_queue = dev->data->rx_queues[i];
192 ret = rte_eth_rx_queue_setup(port, i, rxinfo.nb_desc, 0,
193 &rxinfo.conf, rx_queue->mb_pool);
196 "rte_eth_rx_queue_setup failed ret=%d",
205 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
207 static void hn_vf_add_retry(void *args)
209 struct rte_eth_dev *dev = args;
210 struct hn_data *hv = dev->data->dev_private;
215 int hn_vf_configure(struct rte_eth_dev *dev,
216 const struct rte_eth_conf *dev_conf);
218 /* Add new VF device to synthetic device */
219 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
223 if (!hv->vf_ctx.vf_vsp_reported || hv->vf_ctx.vf_vsc_switched)
226 rte_rwlock_write_lock(&hv->vf_lock);
228 ret = hn_vf_attach(dev, hv);
231 "RNDIS reports VF but device not found, retrying");
232 rte_eal_alarm_set(1000000, hn_vf_add_retry, dev);
236 port = hv->vf_ctx.vf_port;
238 /* If the primary device has started, this is a VF host add.
239 * Configure and start VF device.
241 if (dev->data->dev_started) {
242 if (rte_eth_devices[port].data->dev_started) {
243 PMD_DRV_LOG(ERR, "VF already started on hot add");
247 PMD_DRV_LOG(NOTICE, "configuring VF port %d", port);
248 ret = hn_vf_configure(dev, &dev->data->dev_conf);
250 PMD_DRV_LOG(ERR, "Failed to configure VF port %d",
255 ret = hn_setup_vf_queues(port, dev);
258 "Failed to configure VF queues port %d",
263 PMD_DRV_LOG(NOTICE, "Starting VF port %d", port);
264 ret = rte_eth_dev_start(port);
266 PMD_DRV_LOG(ERR, "rte_eth_dev_start failed ret=%d",
270 hv->vf_ctx.vf_state = vf_started;
273 ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
275 hv->vf_ctx.vf_vsc_switched = true;
278 rte_rwlock_write_unlock(&hv->vf_lock);
282 /* Switch data path to VF device */
283 static void hn_vf_remove(struct hn_data *hv)
287 if (!hv->vf_ctx.vf_vsc_switched) {
288 PMD_DRV_LOG(ERR, "VF path not active");
292 rte_rwlock_write_lock(&hv->vf_lock);
293 if (!hv->vf_ctx.vf_vsc_switched) {
294 PMD_DRV_LOG(ERR, "VF path not active");
296 /* Stop incoming packets from arriving on VF */
297 ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
299 hv->vf_ctx.vf_vsc_switched = false;
301 rte_rwlock_write_unlock(&hv->vf_lock);
304 /* Handle VF association message from host */
306 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
307 const struct vmbus_chanpkt_hdr *hdr,
310 struct hn_data *hv = dev->data->dev_private;
311 const struct hn_nvs_vf_association *vf_assoc = data;
313 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
314 PMD_DRV_LOG(ERR, "invalid vf association NVS");
318 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
320 vf_assoc->allocated ? "add to" : "remove from",
323 hv->vf_ctx.vf_vsp_reported = vf_assoc->allocated;
325 if (dev->state == RTE_ETH_DEV_ATTACHED) {
326 if (vf_assoc->allocated)
334 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
335 const struct rte_eth_desc_lim *vf_lim)
337 lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
338 lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
339 lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
340 lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
341 lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
345 * Merge the info from the VF and synthetic path.
346 * use the default config of the VF
347 * and the minimum number of queues and buffer sizes.
349 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
350 struct rte_eth_dev_info *info)
352 struct rte_eth_dev_info vf_info;
355 ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
359 info->speed_capa = vf_info.speed_capa;
360 info->default_rxportconf = vf_info.default_rxportconf;
361 info->default_txportconf = vf_info.default_txportconf;
363 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
364 info->max_rx_queues);
365 info->rx_offload_capa &= vf_info.rx_offload_capa;
366 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
367 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
369 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
370 info->max_tx_queues);
371 info->tx_offload_capa &= vf_info.tx_offload_capa;
372 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
373 hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
375 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
376 info->min_rx_bufsize);
377 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
378 info->max_rx_pktlen);
379 hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
384 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
386 struct rte_eth_dev *vf_dev;
389 rte_rwlock_read_lock(&hv->vf_lock);
390 vf_dev = hn_get_vf_dev(hv);
392 ret = hn_vf_info_merge(vf_dev, info);
393 rte_rwlock_read_unlock(&hv->vf_lock);
397 int hn_vf_configure(struct rte_eth_dev *dev,
398 const struct rte_eth_conf *dev_conf)
400 struct hn_data *hv = dev->data->dev_private;
401 struct rte_eth_conf vf_conf = *dev_conf;
404 /* link state interrupt does not matter here. */
405 vf_conf.intr_conf.lsc = 0;
407 /* need to monitor removal event */
408 vf_conf.intr_conf.rmv = 1;
410 if (hv->vf_ctx.vf_attached) {
411 ret = rte_eth_dev_callback_register(hv->vf_ctx.vf_port,
412 RTE_ETH_EVENT_INTR_RMV,
413 hn_eth_rmv_event_callback,
417 "Registering callback failed for vf port %d ret %d",
418 hv->vf_ctx.vf_port, ret);
422 ret = rte_eth_dev_configure(hv->vf_ctx.vf_port,
423 dev->data->nb_rx_queues,
424 dev->data->nb_tx_queues,
427 PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret);
429 rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
430 RTE_ETH_EVENT_INTR_RMV,
431 hn_eth_rmv_event_callback,
437 hv->vf_ctx.vf_state = vf_configured;
443 /* Configure VF if present.
444 * VF device will have the same number of queues as the synthetic device
446 int hn_vf_configure_locked(struct rte_eth_dev *dev,
447 const struct rte_eth_conf *dev_conf)
449 struct hn_data *hv = dev->data->dev_private;
452 rte_rwlock_write_lock(&hv->vf_lock);
453 ret = hn_vf_configure(dev, dev_conf);
454 rte_rwlock_write_unlock(&hv->vf_lock);
459 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
461 struct hn_data *hv = dev->data->dev_private;
462 struct rte_eth_dev *vf_dev;
463 const uint32_t *ptypes = NULL;
465 rte_rwlock_read_lock(&hv->vf_lock);
466 vf_dev = hn_get_vf_dev(hv);
467 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
468 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
469 rte_rwlock_read_unlock(&hv->vf_lock);
474 int hn_vf_start(struct rte_eth_dev *dev)
476 struct hn_data *hv = dev->data->dev_private;
477 struct rte_eth_dev *vf_dev;
480 rte_rwlock_read_lock(&hv->vf_lock);
481 vf_dev = hn_get_vf_dev(hv);
483 ret = rte_eth_dev_start(vf_dev->data->port_id);
484 rte_rwlock_read_unlock(&hv->vf_lock);
488 int hn_vf_stop(struct rte_eth_dev *dev)
490 struct hn_data *hv = dev->data->dev_private;
491 struct rte_eth_dev *vf_dev;
494 rte_rwlock_read_lock(&hv->vf_lock);
495 vf_dev = hn_get_vf_dev(hv);
497 ret = rte_eth_dev_stop(vf_dev->data->port_id);
499 PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
500 vf_dev->data->port_id);
502 rte_rwlock_read_unlock(&hv->vf_lock);
507 /* If VF is present, then cascade configuration down */
508 #define VF_ETHDEV_FUNC(dev, func) \
510 struct hn_data *hv = (dev)->data->dev_private; \
511 struct rte_eth_dev *vf_dev; \
512 rte_rwlock_read_lock(&hv->vf_lock); \
513 vf_dev = hn_get_vf_dev(hv); \
515 func(vf_dev->data->port_id); \
516 rte_rwlock_read_unlock(&hv->vf_lock); \
519 /* If VF is present, then cascade configuration down */
520 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
522 struct hn_data *hv = (dev)->data->dev_private; \
523 struct rte_eth_dev *vf_dev; \
525 rte_rwlock_read_lock(&hv->vf_lock); \
526 vf_dev = hn_get_vf_dev(hv); \
528 ret = func(vf_dev->data->port_id); \
529 rte_rwlock_read_unlock(&hv->vf_lock); \
533 void hn_vf_reset(struct rte_eth_dev *dev)
535 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
538 int hn_vf_close(struct rte_eth_dev *dev)
541 struct hn_data *hv = dev->data->dev_private;
543 rte_eal_alarm_cancel(hn_vf_add_retry, dev);
545 rte_rwlock_read_lock(&hv->vf_lock);
546 if (hv->vf_ctx.vf_attached) {
547 rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
548 RTE_ETH_EVENT_INTR_RMV,
549 hn_eth_rmv_event_callback,
551 rte_eal_alarm_cancel(hn_remove_delayed, hv);
552 ret = rte_eth_dev_close(hv->vf_ctx.vf_port);
553 hv->vf_ctx.vf_attached = false;
555 rte_rwlock_read_unlock(&hv->vf_lock);
560 int hn_vf_stats_reset(struct rte_eth_dev *dev)
562 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
565 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
567 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
570 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
572 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
575 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
577 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
580 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
582 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
585 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
586 struct rte_ether_addr *mc_addr_set,
589 struct hn_data *hv = dev->data->dev_private;
590 struct rte_eth_dev *vf_dev;
593 rte_rwlock_read_lock(&hv->vf_lock);
594 vf_dev = hn_get_vf_dev(hv);
596 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
597 mc_addr_set, nb_mc_addr);
598 rte_rwlock_read_unlock(&hv->vf_lock);
602 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
603 uint16_t queue_idx, uint16_t nb_desc,
604 unsigned int socket_id,
605 const struct rte_eth_txconf *tx_conf)
607 struct hn_data *hv = dev->data->dev_private;
608 struct rte_eth_dev *vf_dev;
611 rte_rwlock_read_lock(&hv->vf_lock);
612 vf_dev = hn_get_vf_dev(hv);
614 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
617 rte_rwlock_read_unlock(&hv->vf_lock);
621 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
623 struct rte_eth_dev *vf_dev;
625 rte_rwlock_read_lock(&hv->vf_lock);
626 vf_dev = hn_get_vf_dev(hv);
627 if (vf_dev && vf_dev->dev_ops->tx_queue_release)
628 (*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
630 rte_rwlock_read_unlock(&hv->vf_lock);
633 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
634 uint16_t queue_idx, uint16_t nb_desc,
635 unsigned int socket_id,
636 const struct rte_eth_rxconf *rx_conf,
637 struct rte_mempool *mp)
639 struct hn_data *hv = dev->data->dev_private;
640 struct rte_eth_dev *vf_dev;
643 rte_rwlock_read_lock(&hv->vf_lock);
644 vf_dev = hn_get_vf_dev(hv);
646 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
648 socket_id, rx_conf, mp);
649 rte_rwlock_read_unlock(&hv->vf_lock);
653 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
655 struct rte_eth_dev *vf_dev;
657 rte_rwlock_read_lock(&hv->vf_lock);
658 vf_dev = hn_get_vf_dev(hv);
659 if (vf_dev && vf_dev->dev_ops->rx_queue_release)
660 (*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
661 rte_rwlock_read_unlock(&hv->vf_lock);
664 int hn_vf_stats_get(struct rte_eth_dev *dev,
665 struct rte_eth_stats *stats)
667 struct hn_data *hv = dev->data->dev_private;
668 struct rte_eth_dev *vf_dev;
671 rte_rwlock_read_lock(&hv->vf_lock);
672 vf_dev = hn_get_vf_dev(hv);
674 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
675 rte_rwlock_read_unlock(&hv->vf_lock);
679 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
680 struct rte_eth_xstat_name *names,
683 struct hn_data *hv = dev->data->dev_private;
684 struct rte_eth_dev *vf_dev;
687 rte_rwlock_read_lock(&hv->vf_lock);
688 vf_dev = hn_get_vf_dev(hv);
690 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
692 rte_rwlock_read_unlock(&hv->vf_lock);
694 /* add vf_ prefix to xstat names */
696 for (i = 0; i < count; i++) {
697 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
699 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
700 strlcpy(names[i].name, tmp, sizeof(names[i].name));
707 int hn_vf_xstats_get(struct rte_eth_dev *dev,
708 struct rte_eth_xstat *xstats,
712 struct hn_data *hv = dev->data->dev_private;
713 struct rte_eth_dev *vf_dev;
716 rte_rwlock_read_lock(&hv->vf_lock);
717 vf_dev = hn_get_vf_dev(hv);
719 count = rte_eth_xstats_get(vf_dev->data->port_id,
720 xstats + offset, n - offset);
721 rte_rwlock_read_unlock(&hv->vf_lock);
723 /* Offset id's for VF stats */
725 for (i = 0; i < count; i++)
726 xstats[i + offset].id += offset;
732 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
734 struct hn_data *hv = dev->data->dev_private;
735 struct rte_eth_dev *vf_dev;
738 rte_rwlock_read_lock(&hv->vf_lock);
739 vf_dev = hn_get_vf_dev(hv);
741 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
744 rte_rwlock_read_unlock(&hv->vf_lock);
749 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
750 struct rte_eth_rss_conf *rss_conf)
752 struct hn_data *hv = dev->data->dev_private;
753 struct rte_eth_dev *vf_dev;
756 rte_rwlock_read_lock(&hv->vf_lock);
757 vf_dev = hn_get_vf_dev(hv);
758 if (vf_dev && vf_dev->dev_ops->rss_hash_update)
759 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
760 rte_rwlock_read_unlock(&hv->vf_lock);
765 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
766 struct rte_eth_rss_reta_entry64 *reta_conf,
769 struct hn_data *hv = dev->data->dev_private;
770 struct rte_eth_dev *vf_dev;
773 rte_rwlock_read_lock(&hv->vf_lock);
774 vf_dev = hn_get_vf_dev(hv);
775 if (vf_dev && vf_dev->dev_ops->reta_update)
776 ret = vf_dev->dev_ops->reta_update(vf_dev,
777 reta_conf, reta_size);
778 rte_rwlock_read_unlock(&hv->vf_lock);