1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
13 #include <sys/types.h>
14 #include <sys/fcntl.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
35 const struct ether_addr *mac = dev->data->mac_addrs;
39 ether_format_addr(buf, sizeof(buf), mac);
40 RTE_ETH_FOREACH_DEV(i) {
41 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
42 const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
47 ether_format_addr(buf, sizeof(buf), vf_mac);
48 if (is_same_ether_addr(mac, vf_mac))
55 * Attach new PCI VF device and return the port_id
57 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id,
58 struct rte_eth_dev **vf_dev)
60 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
63 ret = rte_eth_dev_owner_get(port_id, &owner);
65 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
69 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
70 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
75 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
77 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
81 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
83 *vf_dev = &rte_eth_devices[port_id];
87 /* Add new VF device to synthetic device */
88 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
92 port = hn_vf_match(dev);
94 PMD_DRV_LOG(NOTICE, "No matching MAC found");
98 rte_spinlock_lock(&hv->vf_lock);
100 PMD_DRV_LOG(ERR, "VF already attached");
103 err = hn_vf_attach(hv, port, &hv->vf_dev);
107 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
108 hv->vf_intr = (struct rte_intr_handle) {
110 .type = RTE_INTR_HANDLE_EXT,
112 dev->intr_handle = &hv->vf_intr;
113 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
115 rte_spinlock_unlock(&hv->vf_lock);
120 /* Remove new VF device */
121 static void hn_vf_remove(struct hn_data *hv)
123 struct rte_eth_dev *vf_dev;
125 rte_spinlock_lock(&hv->vf_lock);
128 PMD_DRV_LOG(ERR, "VF path not active");
129 rte_spinlock_unlock(&hv->vf_lock);
133 /* Stop incoming packets from arriving on VF */
134 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
137 /* Give back ownership */
138 rte_eth_dev_owner_unset(vf_dev->data->port_id, hv->owner.id);
139 rte_spinlock_unlock(&hv->vf_lock);
142 /* Handle VF association message from host */
144 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
145 const struct vmbus_chanpkt_hdr *hdr,
148 struct hn_data *hv = dev->data->dev_private;
149 const struct hn_nvs_vf_association *vf_assoc = data;
151 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
152 PMD_DRV_LOG(ERR, "invalid vf association NVS");
156 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
158 vf_assoc->allocated ? "add to" : "remove from",
161 hv->vf_present = vf_assoc->allocated;
163 if (dev->state != RTE_ETH_DEV_ATTACHED)
166 if (vf_assoc->allocated)
173 * Merge the info from the VF and synthetic path.
174 * use the default config of the VF
175 * and the minimum number of queues and buffer sizes.
177 static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
178 struct rte_eth_dev_info *info)
180 struct rte_eth_dev_info vf_info;
182 rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
184 info->speed_capa = vf_info.speed_capa;
185 info->default_rxportconf = vf_info.default_rxportconf;
186 info->default_txportconf = vf_info.default_txportconf;
188 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
189 info->max_rx_queues);
190 info->rx_offload_capa &= vf_info.rx_offload_capa;
191 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
192 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
194 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
195 info->max_tx_queues);
196 info->tx_offload_capa &= vf_info.tx_offload_capa;
197 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
199 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
200 info->min_rx_bufsize);
201 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
202 info->max_rx_pktlen);
205 void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
207 struct rte_eth_dev *vf_dev;
209 rte_spinlock_lock(&hv->vf_lock);
212 hn_vf_info_merge(vf_dev, info);
213 rte_spinlock_unlock(&hv->vf_lock);
216 int hn_vf_link_update(struct rte_eth_dev *dev,
217 int wait_to_complete)
219 struct hn_data *hv = dev->data->dev_private;
220 struct rte_eth_dev *vf_dev;
223 rte_spinlock_lock(&hv->vf_lock);
225 if (vf_dev && vf_dev->dev_ops->link_update)
226 ret = (*vf_dev->dev_ops->link_update)(dev, wait_to_complete);
227 rte_spinlock_unlock(&hv->vf_lock);
232 /* called when VF has link state interrupts enabled */
233 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
234 enum rte_eth_event_type event,
235 void *cb_arg, void *out __rte_unused)
237 struct rte_eth_dev *dev = cb_arg;
239 if (event != RTE_ETH_EVENT_INTR_LSC)
242 /* if link state has changed pass on */
243 if (hn_dev_link_update(dev, 0) == 0)
244 return 0; /* no change */
246 return _rte_eth_dev_callback_process(dev,
247 RTE_ETH_EVENT_INTR_LSC,
251 static int _hn_vf_configure(struct rte_eth_dev *dev,
252 struct rte_eth_dev *vf_dev,
253 const struct rte_eth_conf *dev_conf)
255 struct rte_eth_conf vf_conf = *dev_conf;
256 uint16_t vf_port = vf_dev->data->port_id;
259 if (dev_conf->intr_conf.lsc &&
260 (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
261 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
263 vf_conf.intr_conf.lsc = 1;
265 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
267 vf_conf.intr_conf.lsc = 0;
270 ret = rte_eth_dev_configure(vf_port,
271 dev->data->nb_rx_queues,
272 dev->data->nb_tx_queues,
276 "VF configuration failed: %d", ret);
277 } else if (vf_conf.intr_conf.lsc) {
278 ret = rte_eth_dev_callback_register(vf_port,
279 RTE_ETH_DEV_INTR_LSC,
280 hn_vf_lsc_event, dev);
283 "Failed to register LSC callback for VF %u",
290 * Configure VF if present.
291 * Force VF to have same number of queues as synthetic device
293 int hn_vf_configure(struct rte_eth_dev *dev,
294 const struct rte_eth_conf *dev_conf)
296 struct hn_data *hv = dev->data->dev_private;
297 struct rte_eth_dev *vf_dev;
300 rte_spinlock_lock(&hv->vf_lock);
303 ret = _hn_vf_configure(dev, vf_dev, dev_conf);
304 rte_spinlock_unlock(&hv->vf_lock);
308 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
310 struct hn_data *hv = dev->data->dev_private;
311 struct rte_eth_dev *vf_dev;
312 const uint32_t *ptypes = NULL;
314 rte_spinlock_lock(&hv->vf_lock);
316 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
317 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
318 rte_spinlock_unlock(&hv->vf_lock);
323 int hn_vf_start(struct rte_eth_dev *dev)
325 struct hn_data *hv = dev->data->dev_private;
326 struct rte_eth_dev *vf_dev;
329 rte_spinlock_lock(&hv->vf_lock);
332 ret = rte_eth_dev_start(vf_dev->data->port_id);
333 rte_spinlock_unlock(&hv->vf_lock);
337 void hn_vf_stop(struct rte_eth_dev *dev)
339 struct hn_data *hv = dev->data->dev_private;
340 struct rte_eth_dev *vf_dev;
342 rte_spinlock_lock(&hv->vf_lock);
345 rte_eth_dev_stop(vf_dev->data->port_id);
346 rte_spinlock_unlock(&hv->vf_lock);
349 /* If VF is present, then cascade configuration down */
350 #define VF_ETHDEV_FUNC(dev, func) \
352 struct hn_data *hv = (dev)->data->dev_private; \
353 struct rte_eth_dev *vf_dev; \
354 rte_spinlock_lock(&hv->vf_lock); \
355 vf_dev = hv->vf_dev; \
357 func(vf_dev->data->port_id); \
358 rte_spinlock_unlock(&hv->vf_lock); \
361 void hn_vf_reset(struct rte_eth_dev *dev)
363 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
366 void hn_vf_close(struct rte_eth_dev *dev)
368 VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
371 void hn_vf_stats_reset(struct rte_eth_dev *dev)
373 VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
376 void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
378 VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
381 void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
383 VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
386 void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
388 VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
391 void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
393 VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
396 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
397 struct ether_addr *mc_addr_set,
400 struct hn_data *hv = dev->data->dev_private;
401 struct rte_eth_dev *vf_dev;
404 rte_spinlock_lock(&hv->vf_lock);
407 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
408 mc_addr_set, nb_mc_addr);
409 rte_spinlock_unlock(&hv->vf_lock);
413 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
414 uint16_t queue_idx, uint16_t nb_desc,
415 unsigned int socket_id,
416 const struct rte_eth_txconf *tx_conf)
418 struct hn_data *hv = dev->data->dev_private;
419 struct rte_eth_dev *vf_dev;
422 rte_spinlock_lock(&hv->vf_lock);
425 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
428 rte_spinlock_unlock(&hv->vf_lock);
432 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
434 struct rte_eth_dev *vf_dev;
436 rte_spinlock_lock(&hv->vf_lock);
438 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
439 void *subq = vf_dev->data->tx_queues[queue_id];
441 (*vf_dev->dev_ops->tx_queue_release)(subq);
444 rte_spinlock_unlock(&hv->vf_lock);
447 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
448 uint16_t queue_idx, uint16_t nb_desc,
449 unsigned int socket_id,
450 const struct rte_eth_rxconf *rx_conf,
451 struct rte_mempool *mp)
453 struct hn_data *hv = dev->data->dev_private;
454 struct rte_eth_dev *vf_dev;
457 rte_spinlock_lock(&hv->vf_lock);
460 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
462 socket_id, rx_conf, mp);
463 rte_spinlock_unlock(&hv->vf_lock);
467 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
469 struct rte_eth_dev *vf_dev;
471 rte_spinlock_lock(&hv->vf_lock);
473 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
474 void *subq = vf_dev->data->rx_queues[queue_id];
476 (*vf_dev->dev_ops->rx_queue_release)(subq);
478 rte_spinlock_unlock(&hv->vf_lock);
481 int hn_vf_stats_get(struct rte_eth_dev *dev,
482 struct rte_eth_stats *stats)
484 struct hn_data *hv = dev->data->dev_private;
485 struct rte_eth_dev *vf_dev;
488 rte_spinlock_lock(&hv->vf_lock);
491 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
492 rte_spinlock_unlock(&hv->vf_lock);
496 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
497 struct rte_eth_xstat_name *names,
500 struct hn_data *hv = dev->data->dev_private;
501 struct rte_eth_dev *vf_dev;
503 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
505 rte_spinlock_lock(&hv->vf_lock);
507 if (vf_dev && vf_dev->dev_ops->xstats_get_names)
508 count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
509 rte_spinlock_unlock(&hv->vf_lock);
511 /* add vf_ prefix to xstat names */
513 for (i = 0; i < count; i++) {
514 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
515 strlcpy(names[i].name, tmp, sizeof(names[i].name));
522 int hn_vf_xstats_get(struct rte_eth_dev *dev,
523 struct rte_eth_xstat *xstats,
526 struct hn_data *hv = dev->data->dev_private;
527 struct rte_eth_dev *vf_dev;
530 rte_spinlock_lock(&hv->vf_lock);
532 if (vf_dev && vf_dev->dev_ops->xstats_get)
533 count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
534 rte_spinlock_unlock(&hv->vf_lock);
539 void hn_vf_xstats_reset(struct rte_eth_dev *dev)
541 struct hn_data *hv = dev->data->dev_private;
542 struct rte_eth_dev *vf_dev;
544 rte_spinlock_lock(&hv->vf_lock);
546 if (vf_dev && vf_dev->dev_ops->xstats_reset)
547 vf_dev->dev_ops->xstats_reset(vf_dev);
548 rte_spinlock_unlock(&hv->vf_lock);