1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
14 #include <sys/types.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
35 const struct rte_ether_addr *mac = dev->data->mac_addrs;
38 RTE_ETH_FOREACH_DEV(i) {
39 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
45 if (rte_is_same_ether_addr(mac, vf_mac))
53 * Attach new PCI VF device and return the port_id
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
57 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
60 if (hn_vf_attached(hv)) {
61 PMD_DRV_LOG(ERR, "VF already attached");
65 ret = rte_eth_dev_owner_get(port_id, &owner);
67 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
71 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
77 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
79 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
83 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 hv->vf_port = port_id;
88 /* Add new VF device to synthetic device */
89 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
93 port = hn_vf_match(dev);
95 PMD_DRV_LOG(NOTICE, "No matching MAC found");
99 err = hn_vf_attach(hv, port);
101 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
102 hv->vf_intr = (struct rte_intr_handle) {
104 .type = RTE_INTR_HANDLE_EXT,
106 dev->intr_handle = &hv->vf_intr;
107 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
113 /* Remove new VF device */
114 static void hn_vf_remove(struct hn_data *hv)
117 if (!hn_vf_attached(hv)) {
118 PMD_DRV_LOG(ERR, "VF path not active");
120 /* Stop incoming packets from arriving on VF */
121 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
123 /* Give back ownership */
124 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
126 /* Stop transmission over VF */
127 hv->vf_port = HN_INVALID_PORT;
131 /* Handle VF association message from host */
133 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
134 const struct vmbus_chanpkt_hdr *hdr,
137 struct hn_data *hv = dev->data->dev_private;
138 const struct hn_nvs_vf_association *vf_assoc = data;
140 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
141 PMD_DRV_LOG(ERR, "invalid vf association NVS");
145 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
147 vf_assoc->allocated ? "add to" : "remove from",
150 rte_rwlock_write_lock(&hv->vf_lock);
151 hv->vf_present = vf_assoc->allocated;
153 if (dev->state == RTE_ETH_DEV_ATTACHED) {
154 if (vf_assoc->allocated)
159 rte_rwlock_write_unlock(&hv->vf_lock);
163 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
164 const struct rte_eth_desc_lim *vf_lim)
166 lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
167 lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
168 lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
169 lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
170 lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
174 * Merge the info from the VF and synthetic path.
175 * use the default config of the VF
176 * and the minimum number of queues and buffer sizes.
178 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
179 struct rte_eth_dev_info *info)
181 struct rte_eth_dev_info vf_info;
184 ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
188 info->speed_capa = vf_info.speed_capa;
189 info->default_rxportconf = vf_info.default_rxportconf;
190 info->default_txportconf = vf_info.default_txportconf;
192 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
193 info->max_rx_queues);
194 info->rx_offload_capa &= vf_info.rx_offload_capa;
195 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
196 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
198 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
199 info->max_tx_queues);
200 info->tx_offload_capa &= vf_info.tx_offload_capa;
201 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
202 hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
204 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
205 info->min_rx_bufsize);
206 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
207 info->max_rx_pktlen);
208 hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
213 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
215 struct rte_eth_dev *vf_dev;
218 rte_rwlock_read_lock(&hv->vf_lock);
219 vf_dev = hn_get_vf_dev(hv);
221 ret = hn_vf_info_merge(vf_dev, info);
222 rte_rwlock_read_unlock(&hv->vf_lock);
226 int hn_vf_link_update(struct rte_eth_dev *dev,
227 int wait_to_complete)
229 struct hn_data *hv = dev->data->dev_private;
230 struct rte_eth_dev *vf_dev;
233 rte_rwlock_read_lock(&hv->vf_lock);
234 vf_dev = hn_get_vf_dev(hv);
235 if (vf_dev && vf_dev->dev_ops->link_update)
236 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
237 rte_rwlock_read_unlock(&hv->vf_lock);
242 /* called when VF has link state interrupts enabled */
243 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
244 enum rte_eth_event_type event,
245 void *cb_arg, void *out __rte_unused)
247 struct rte_eth_dev *dev = cb_arg;
249 if (event != RTE_ETH_EVENT_INTR_LSC)
252 /* if link state has changed pass on */
253 if (hn_dev_link_update(dev, 0) == 0)
254 return 0; /* no change */
256 return _rte_eth_dev_callback_process(dev,
257 RTE_ETH_EVENT_INTR_LSC,
261 static int _hn_vf_configure(struct rte_eth_dev *dev,
263 const struct rte_eth_conf *dev_conf)
265 struct rte_eth_conf vf_conf = *dev_conf;
266 struct rte_eth_dev *vf_dev;
269 vf_dev = &rte_eth_devices[vf_port];
270 if (dev_conf->intr_conf.lsc &&
271 (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
272 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
274 vf_conf.intr_conf.lsc = 1;
276 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
278 vf_conf.intr_conf.lsc = 0;
281 ret = rte_eth_dev_configure(vf_port,
282 dev->data->nb_rx_queues,
283 dev->data->nb_tx_queues,
287 "VF configuration failed: %d", ret);
288 } else if (vf_conf.intr_conf.lsc) {
289 ret = rte_eth_dev_callback_register(vf_port,
290 RTE_ETH_DEV_INTR_LSC,
291 hn_vf_lsc_event, dev);
294 "Failed to register LSC callback for VF %u",
301 * Configure VF if present.
302 * Force VF to have same number of queues as synthetic device
304 int hn_vf_configure(struct rte_eth_dev *dev,
305 const struct rte_eth_conf *dev_conf)
307 struct hn_data *hv = dev->data->dev_private;
310 rte_rwlock_read_lock(&hv->vf_lock);
311 if (hv->vf_port != HN_INVALID_PORT)
312 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
313 rte_rwlock_read_unlock(&hv->vf_lock);
317 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
319 struct hn_data *hv = dev->data->dev_private;
320 struct rte_eth_dev *vf_dev;
321 const uint32_t *ptypes = NULL;
323 rte_rwlock_read_lock(&hv->vf_lock);
324 vf_dev = hn_get_vf_dev(hv);
325 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
326 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
327 rte_rwlock_read_unlock(&hv->vf_lock);
332 int hn_vf_start(struct rte_eth_dev *dev)
334 struct hn_data *hv = dev->data->dev_private;
335 struct rte_eth_dev *vf_dev;
338 rte_rwlock_read_lock(&hv->vf_lock);
339 vf_dev = hn_get_vf_dev(hv);
341 ret = rte_eth_dev_start(vf_dev->data->port_id);
342 rte_rwlock_read_unlock(&hv->vf_lock);
346 void hn_vf_stop(struct rte_eth_dev *dev)
348 struct hn_data *hv = dev->data->dev_private;
349 struct rte_eth_dev *vf_dev;
351 rte_rwlock_read_lock(&hv->vf_lock);
352 vf_dev = hn_get_vf_dev(hv);
354 rte_eth_dev_stop(vf_dev->data->port_id);
355 rte_rwlock_read_unlock(&hv->vf_lock);
358 /* If VF is present, then cascade configuration down */
359 #define VF_ETHDEV_FUNC(dev, func) \
361 struct hn_data *hv = (dev)->data->dev_private; \
362 struct rte_eth_dev *vf_dev; \
363 rte_rwlock_read_lock(&hv->vf_lock); \
364 vf_dev = hn_get_vf_dev(hv); \
366 func(vf_dev->data->port_id); \
367 rte_rwlock_read_unlock(&hv->vf_lock); \
370 /* If VF is present, then cascade configuration down */
371 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
373 struct hn_data *hv = (dev)->data->dev_private; \
374 struct rte_eth_dev *vf_dev; \
376 rte_rwlock_read_lock(&hv->vf_lock); \
377 vf_dev = hn_get_vf_dev(hv); \
379 ret = func(vf_dev->data->port_id); \
380 rte_rwlock_read_unlock(&hv->vf_lock); \
384 void hn_vf_reset(struct rte_eth_dev *dev)
386 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
389 void hn_vf_close(struct rte_eth_dev *dev)
391 struct hn_data *hv = dev->data->dev_private;
394 rte_rwlock_read_lock(&hv->vf_lock);
395 vf_port = hv->vf_port;
396 if (vf_port != HN_INVALID_PORT)
397 rte_eth_dev_close(vf_port);
399 hv->vf_port = HN_INVALID_PORT;
400 rte_rwlock_read_unlock(&hv->vf_lock);
403 int hn_vf_stats_reset(struct rte_eth_dev *dev)
405 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
408 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
410 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
413 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
415 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
418 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
420 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
423 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
425 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
428 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
429 struct rte_ether_addr *mc_addr_set,
432 struct hn_data *hv = dev->data->dev_private;
433 struct rte_eth_dev *vf_dev;
436 rte_rwlock_read_lock(&hv->vf_lock);
437 vf_dev = hn_get_vf_dev(hv);
439 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
440 mc_addr_set, nb_mc_addr);
441 rte_rwlock_read_unlock(&hv->vf_lock);
445 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
446 uint16_t queue_idx, uint16_t nb_desc,
447 unsigned int socket_id,
448 const struct rte_eth_txconf *tx_conf)
450 struct hn_data *hv = dev->data->dev_private;
451 struct rte_eth_dev *vf_dev;
454 rte_rwlock_read_lock(&hv->vf_lock);
455 vf_dev = hn_get_vf_dev(hv);
457 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
460 rte_rwlock_read_unlock(&hv->vf_lock);
464 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
466 struct rte_eth_dev *vf_dev;
468 rte_rwlock_read_lock(&hv->vf_lock);
469 vf_dev = hn_get_vf_dev(hv);
470 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
471 void *subq = vf_dev->data->tx_queues[queue_id];
473 (*vf_dev->dev_ops->tx_queue_release)(subq);
476 rte_rwlock_read_unlock(&hv->vf_lock);
479 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
480 uint16_t queue_idx, uint16_t nb_desc,
481 unsigned int socket_id,
482 const struct rte_eth_rxconf *rx_conf,
483 struct rte_mempool *mp)
485 struct hn_data *hv = dev->data->dev_private;
486 struct rte_eth_dev *vf_dev;
489 rte_rwlock_read_lock(&hv->vf_lock);
490 vf_dev = hn_get_vf_dev(hv);
492 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
494 socket_id, rx_conf, mp);
495 rte_rwlock_read_unlock(&hv->vf_lock);
499 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
501 struct rte_eth_dev *vf_dev;
503 rte_rwlock_read_lock(&hv->vf_lock);
504 vf_dev = hn_get_vf_dev(hv);
505 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
506 void *subq = vf_dev->data->rx_queues[queue_id];
508 (*vf_dev->dev_ops->rx_queue_release)(subq);
510 rte_rwlock_read_unlock(&hv->vf_lock);
513 int hn_vf_stats_get(struct rte_eth_dev *dev,
514 struct rte_eth_stats *stats)
516 struct hn_data *hv = dev->data->dev_private;
517 struct rte_eth_dev *vf_dev;
520 rte_rwlock_read_lock(&hv->vf_lock);
521 vf_dev = hn_get_vf_dev(hv);
523 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
524 rte_rwlock_read_unlock(&hv->vf_lock);
528 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
529 struct rte_eth_xstat_name *names,
532 struct hn_data *hv = dev->data->dev_private;
533 struct rte_eth_dev *vf_dev;
536 rte_rwlock_read_lock(&hv->vf_lock);
537 vf_dev = hn_get_vf_dev(hv);
539 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
541 rte_rwlock_read_unlock(&hv->vf_lock);
543 /* add vf_ prefix to xstat names */
545 for (i = 0; i < count; i++) {
546 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
548 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
549 strlcpy(names[i].name, tmp, sizeof(names[i].name));
556 int hn_vf_xstats_get(struct rte_eth_dev *dev,
557 struct rte_eth_xstat *xstats,
561 struct hn_data *hv = dev->data->dev_private;
562 struct rte_eth_dev *vf_dev;
565 rte_rwlock_read_lock(&hv->vf_lock);
566 vf_dev = hn_get_vf_dev(hv);
568 count = rte_eth_xstats_get(vf_dev->data->port_id,
569 xstats + offset, n - offset);
570 rte_rwlock_read_unlock(&hv->vf_lock);
572 /* Offset id's for VF stats */
574 for (i = 0; i < count; i++)
575 xstats[i + offset].id += offset;
581 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
583 struct hn_data *hv = dev->data->dev_private;
584 struct rte_eth_dev *vf_dev;
587 rte_rwlock_read_lock(&hv->vf_lock);
588 vf_dev = hn_get_vf_dev(hv);
590 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
593 rte_rwlock_read_unlock(&hv->vf_lock);
598 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
599 struct rte_eth_rss_conf *rss_conf)
601 struct hn_data *hv = dev->data->dev_private;
602 struct rte_eth_dev *vf_dev;
605 rte_rwlock_read_lock(&hv->vf_lock);
606 vf_dev = hn_get_vf_dev(hv);
607 if (vf_dev && vf_dev->dev_ops->rss_hash_update)
608 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
609 rte_rwlock_read_unlock(&hv->vf_lock);
614 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
615 struct rte_eth_rss_reta_entry64 *reta_conf,
618 struct hn_data *hv = dev->data->dev_private;
619 struct rte_eth_dev *vf_dev;
622 rte_rwlock_read_lock(&hv->vf_lock);
623 vf_dev = hn_get_vf_dev(hv);
624 if (vf_dev && vf_dev->dev_ops->reta_update)
625 ret = vf_dev->dev_ops->reta_update(vf_dev,
626 reta_conf, reta_size);
627 rte_rwlock_read_unlock(&hv->vf_lock);