1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
14 #include <sys/types.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
35 const struct rte_ether_addr *mac = dev->data->mac_addrs;
38 RTE_ETH_FOREACH_DEV(i) {
39 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
45 if (rte_is_same_ether_addr(mac, vf_mac))
53 * Attach new PCI VF device and return the port_id
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
57 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
60 if (hn_vf_attached(hv)) {
61 PMD_DRV_LOG(ERR, "VF already attached");
65 ret = rte_eth_dev_owner_get(port_id, &owner);
67 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
71 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
77 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
79 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
83 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 hv->vf_port = port_id;
90 /* Add new VF device to synthetic device */
91 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
95 port = hn_vf_match(dev);
97 PMD_DRV_LOG(NOTICE, "No matching MAC found");
101 rte_spinlock_lock(&hv->vf_lock);
102 err = hn_vf_attach(hv, port);
105 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
106 hv->vf_intr = (struct rte_intr_handle) {
108 .type = RTE_INTR_HANDLE_EXT,
110 dev->intr_handle = &hv->vf_intr;
111 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
113 rte_spinlock_unlock(&hv->vf_lock);
118 /* Remove new VF device */
119 static void hn_vf_remove(struct hn_data *hv)
122 rte_spinlock_lock(&hv->vf_lock);
124 if (!hn_vf_attached(hv)) {
125 PMD_DRV_LOG(ERR, "VF path not active");
127 /* Stop incoming packets from arriving on VF */
128 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
130 /* Stop transmission over VF */
131 hv->vf_port = HN_INVALID_PORT;
134 /* Give back ownership */
135 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
137 rte_spinlock_unlock(&hv->vf_lock);
140 /* Handle VF association message from host */
142 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
143 const struct vmbus_chanpkt_hdr *hdr,
146 struct hn_data *hv = dev->data->dev_private;
147 const struct hn_nvs_vf_association *vf_assoc = data;
149 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
150 PMD_DRV_LOG(ERR, "invalid vf association NVS");
154 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
156 vf_assoc->allocated ? "add to" : "remove from",
159 hv->vf_present = vf_assoc->allocated;
161 if (dev->state != RTE_ETH_DEV_ATTACHED)
164 if (vf_assoc->allocated)
171 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
172 const struct rte_eth_desc_lim *vf_lim)
174 lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
175 lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
176 lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
177 lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
178 lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
182 * Merge the info from the VF and synthetic path.
183 * use the default config of the VF
184 * and the minimum number of queues and buffer sizes.
186 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
187 struct rte_eth_dev_info *info)
189 struct rte_eth_dev_info vf_info;
192 ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
196 info->speed_capa = vf_info.speed_capa;
197 info->default_rxportconf = vf_info.default_rxportconf;
198 info->default_txportconf = vf_info.default_txportconf;
200 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
201 info->max_rx_queues);
202 info->rx_offload_capa &= vf_info.rx_offload_capa;
203 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
204 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
206 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
207 info->max_tx_queues);
208 info->tx_offload_capa &= vf_info.tx_offload_capa;
209 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
210 hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
212 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
213 info->min_rx_bufsize);
214 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
215 info->max_rx_pktlen);
216 hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
221 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
223 struct rte_eth_dev *vf_dev;
226 rte_spinlock_lock(&hv->vf_lock);
227 vf_dev = hn_get_vf_dev(hv);
229 ret = hn_vf_info_merge(vf_dev, info);
230 rte_spinlock_unlock(&hv->vf_lock);
234 int hn_vf_link_update(struct rte_eth_dev *dev,
235 int wait_to_complete)
237 struct hn_data *hv = dev->data->dev_private;
238 struct rte_eth_dev *vf_dev;
241 rte_spinlock_lock(&hv->vf_lock);
242 vf_dev = hn_get_vf_dev(hv);
243 if (vf_dev && vf_dev->dev_ops->link_update)
244 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
245 rte_spinlock_unlock(&hv->vf_lock);
250 /* called when VF has link state interrupts enabled */
251 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
252 enum rte_eth_event_type event,
253 void *cb_arg, void *out __rte_unused)
255 struct rte_eth_dev *dev = cb_arg;
257 if (event != RTE_ETH_EVENT_INTR_LSC)
260 /* if link state has changed pass on */
261 if (hn_dev_link_update(dev, 0) == 0)
262 return 0; /* no change */
264 return _rte_eth_dev_callback_process(dev,
265 RTE_ETH_EVENT_INTR_LSC,
269 static int _hn_vf_configure(struct rte_eth_dev *dev,
271 const struct rte_eth_conf *dev_conf)
273 struct rte_eth_conf vf_conf = *dev_conf;
274 struct rte_eth_dev *vf_dev;
277 vf_dev = &rte_eth_devices[vf_port];
278 if (dev_conf->intr_conf.lsc &&
279 (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
280 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
282 vf_conf.intr_conf.lsc = 1;
284 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
286 vf_conf.intr_conf.lsc = 0;
289 ret = rte_eth_dev_configure(vf_port,
290 dev->data->nb_rx_queues,
291 dev->data->nb_tx_queues,
295 "VF configuration failed: %d", ret);
296 } else if (vf_conf.intr_conf.lsc) {
297 ret = rte_eth_dev_callback_register(vf_port,
298 RTE_ETH_DEV_INTR_LSC,
299 hn_vf_lsc_event, dev);
302 "Failed to register LSC callback for VF %u",
309 * Configure VF if present.
310 * Force VF to have same number of queues as synthetic device
312 int hn_vf_configure(struct rte_eth_dev *dev,
313 const struct rte_eth_conf *dev_conf)
315 struct hn_data *hv = dev->data->dev_private;
318 rte_spinlock_lock(&hv->vf_lock);
319 if (hv->vf_port != HN_INVALID_PORT)
320 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
321 rte_spinlock_unlock(&hv->vf_lock);
325 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
327 struct hn_data *hv = dev->data->dev_private;
328 struct rte_eth_dev *vf_dev;
329 const uint32_t *ptypes = NULL;
331 rte_spinlock_lock(&hv->vf_lock);
332 vf_dev = hn_get_vf_dev(hv);
333 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
334 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
335 rte_spinlock_unlock(&hv->vf_lock);
340 int hn_vf_start(struct rte_eth_dev *dev)
342 struct hn_data *hv = dev->data->dev_private;
343 struct rte_eth_dev *vf_dev;
346 rte_spinlock_lock(&hv->vf_lock);
347 vf_dev = hn_get_vf_dev(hv);
349 ret = rte_eth_dev_start(vf_dev->data->port_id);
350 rte_spinlock_unlock(&hv->vf_lock);
354 void hn_vf_stop(struct rte_eth_dev *dev)
356 struct hn_data *hv = dev->data->dev_private;
357 struct rte_eth_dev *vf_dev;
359 rte_spinlock_lock(&hv->vf_lock);
360 vf_dev = hn_get_vf_dev(hv);
362 rte_eth_dev_stop(vf_dev->data->port_id);
363 rte_spinlock_unlock(&hv->vf_lock);
366 /* If VF is present, then cascade configuration down */
367 #define VF_ETHDEV_FUNC(dev, func) \
369 struct hn_data *hv = (dev)->data->dev_private; \
370 struct rte_eth_dev *vf_dev; \
371 rte_spinlock_lock(&hv->vf_lock); \
372 vf_dev = hn_get_vf_dev(hv); \
374 func(vf_dev->data->port_id); \
375 rte_spinlock_unlock(&hv->vf_lock); \
378 /* If VF is present, then cascade configuration down */
379 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
381 struct hn_data *hv = (dev)->data->dev_private; \
382 struct rte_eth_dev *vf_dev; \
384 rte_spinlock_lock(&hv->vf_lock); \
385 vf_dev = hn_get_vf_dev(hv); \
387 ret = func(vf_dev->data->port_id); \
388 rte_spinlock_unlock(&hv->vf_lock); \
392 void hn_vf_reset(struct rte_eth_dev *dev)
394 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
397 void hn_vf_close(struct rte_eth_dev *dev)
399 struct hn_data *hv = dev->data->dev_private;
402 rte_spinlock_lock(&hv->vf_lock);
403 vf_port = hv->vf_port;
404 if (vf_port != HN_INVALID_PORT)
405 rte_eth_dev_close(vf_port);
407 hv->vf_port = HN_INVALID_PORT;
408 rte_spinlock_unlock(&hv->vf_lock);
411 int hn_vf_stats_reset(struct rte_eth_dev *dev)
413 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
416 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
418 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
421 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
423 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
426 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
428 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
431 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
433 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
436 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
437 struct rte_ether_addr *mc_addr_set,
440 struct hn_data *hv = dev->data->dev_private;
441 struct rte_eth_dev *vf_dev;
444 rte_spinlock_lock(&hv->vf_lock);
445 vf_dev = hn_get_vf_dev(hv);
447 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
448 mc_addr_set, nb_mc_addr);
449 rte_spinlock_unlock(&hv->vf_lock);
453 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
454 uint16_t queue_idx, uint16_t nb_desc,
455 unsigned int socket_id,
456 const struct rte_eth_txconf *tx_conf)
458 struct hn_data *hv = dev->data->dev_private;
459 struct rte_eth_dev *vf_dev;
462 rte_spinlock_lock(&hv->vf_lock);
463 vf_dev = hn_get_vf_dev(hv);
465 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
468 rte_spinlock_unlock(&hv->vf_lock);
472 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
474 struct rte_eth_dev *vf_dev;
476 rte_spinlock_lock(&hv->vf_lock);
477 vf_dev = hn_get_vf_dev(hv);
478 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
479 void *subq = vf_dev->data->tx_queues[queue_id];
481 (*vf_dev->dev_ops->tx_queue_release)(subq);
484 rte_spinlock_unlock(&hv->vf_lock);
487 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
488 uint16_t queue_idx, uint16_t nb_desc,
489 unsigned int socket_id,
490 const struct rte_eth_rxconf *rx_conf,
491 struct rte_mempool *mp)
493 struct hn_data *hv = dev->data->dev_private;
494 struct rte_eth_dev *vf_dev;
497 rte_spinlock_lock(&hv->vf_lock);
498 vf_dev = hn_get_vf_dev(hv);
500 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
502 socket_id, rx_conf, mp);
503 rte_spinlock_unlock(&hv->vf_lock);
507 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
509 struct rte_eth_dev *vf_dev;
511 rte_spinlock_lock(&hv->vf_lock);
512 vf_dev = hn_get_vf_dev(hv);
513 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
514 void *subq = vf_dev->data->rx_queues[queue_id];
516 (*vf_dev->dev_ops->rx_queue_release)(subq);
518 rte_spinlock_unlock(&hv->vf_lock);
521 int hn_vf_stats_get(struct rte_eth_dev *dev,
522 struct rte_eth_stats *stats)
524 struct hn_data *hv = dev->data->dev_private;
525 struct rte_eth_dev *vf_dev;
528 rte_spinlock_lock(&hv->vf_lock);
529 vf_dev = hn_get_vf_dev(hv);
531 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
532 rte_spinlock_unlock(&hv->vf_lock);
536 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
537 struct rte_eth_xstat_name *names,
540 struct hn_data *hv = dev->data->dev_private;
541 struct rte_eth_dev *vf_dev;
544 rte_spinlock_lock(&hv->vf_lock);
545 vf_dev = hn_get_vf_dev(hv);
547 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
549 rte_spinlock_unlock(&hv->vf_lock);
551 /* add vf_ prefix to xstat names */
553 for (i = 0; i < count; i++) {
554 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
556 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
557 strlcpy(names[i].name, tmp, sizeof(names[i].name));
564 int hn_vf_xstats_get(struct rte_eth_dev *dev,
565 struct rte_eth_xstat *xstats,
569 struct hn_data *hv = dev->data->dev_private;
570 struct rte_eth_dev *vf_dev;
573 rte_spinlock_lock(&hv->vf_lock);
574 vf_dev = hn_get_vf_dev(hv);
576 count = rte_eth_xstats_get(vf_dev->data->port_id,
577 xstats + offset, n - offset);
578 rte_spinlock_unlock(&hv->vf_lock);
580 /* Offset id's for VF stats */
582 for (i = 0; i < count; i++)
583 xstats[i + offset].id += offset;
589 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
591 struct hn_data *hv = dev->data->dev_private;
592 struct rte_eth_dev *vf_dev;
595 rte_spinlock_lock(&hv->vf_lock);
596 vf_dev = hn_get_vf_dev(hv);
598 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
601 rte_spinlock_unlock(&hv->vf_lock);
606 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
607 struct rte_eth_rss_conf *rss_conf)
609 struct hn_data *hv = dev->data->dev_private;
610 struct rte_eth_dev *vf_dev;
613 rte_spinlock_lock(&hv->vf_lock);
614 vf_dev = hn_get_vf_dev(hv);
615 if (vf_dev && vf_dev->dev_ops->rss_hash_update)
616 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
617 rte_spinlock_unlock(&hv->vf_lock);
622 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
623 struct rte_eth_rss_reta_entry64 *reta_conf,
626 struct hn_data *hv = dev->data->dev_private;
627 struct rte_eth_dev *vf_dev;
630 rte_spinlock_lock(&hv->vf_lock);
631 vf_dev = hn_get_vf_dev(hv);
632 if (vf_dev && vf_dev->dev_ops->reta_update)
633 ret = vf_dev->dev_ops->reta_update(vf_dev,
634 reta_conf, reta_size);
635 rte_spinlock_unlock(&hv->vf_lock);