1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Microsoft Corp.
14 #include <sys/types.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
24 #include <rte_bus_pci.h>
26 #include <rte_string_fns.h>
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
35 const struct rte_ether_addr *mac = dev->data->mac_addrs;
38 RTE_ETH_FOREACH_DEV(i) {
39 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
45 if (rte_is_same_ether_addr(mac, vf_mac))
53 * Attach new PCI VF device and return the port_id
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
57 struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
60 if (hn_vf_attached(hv)) {
61 PMD_DRV_LOG(ERR, "VF already attached");
65 ret = rte_eth_dev_owner_get(port_id, &owner);
67 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
71 if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
77 ret = rte_eth_dev_owner_set(port_id, &hv->owner);
79 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
83 PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84 hv->vf_port = port_id;
88 /* Add new VF device to synthetic device */
89 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
93 port = hn_vf_match(dev);
95 PMD_DRV_LOG(NOTICE, "No matching MAC found");
99 err = hn_vf_attach(hv, port);
101 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
106 /* Remove new VF device */
107 static void hn_vf_remove(struct hn_data *hv)
110 if (!hn_vf_attached(hv)) {
111 PMD_DRV_LOG(ERR, "VF path not active");
113 /* Stop incoming packets from arriving on VF */
114 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
116 /* Give back ownership */
117 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
119 /* Stop transmission over VF */
120 hv->vf_port = HN_INVALID_PORT;
124 /* Handle VF association message from host */
126 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
127 const struct vmbus_chanpkt_hdr *hdr,
130 struct hn_data *hv = dev->data->dev_private;
131 const struct hn_nvs_vf_association *vf_assoc = data;
133 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
134 PMD_DRV_LOG(ERR, "invalid vf association NVS");
138 PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
140 vf_assoc->allocated ? "add to" : "remove from",
143 rte_rwlock_write_lock(&hv->vf_lock);
144 hv->vf_present = vf_assoc->allocated;
146 if (dev->state == RTE_ETH_DEV_ATTACHED) {
147 if (vf_assoc->allocated)
152 rte_rwlock_write_unlock(&hv->vf_lock);
156 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
157 const struct rte_eth_desc_lim *vf_lim)
159 lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
160 lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
161 lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
162 lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
163 lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
167 * Merge the info from the VF and synthetic path.
168 * use the default config of the VF
169 * and the minimum number of queues and buffer sizes.
171 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
172 struct rte_eth_dev_info *info)
174 struct rte_eth_dev_info vf_info;
177 ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
181 info->speed_capa = vf_info.speed_capa;
182 info->default_rxportconf = vf_info.default_rxportconf;
183 info->default_txportconf = vf_info.default_txportconf;
185 info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
186 info->max_rx_queues);
187 info->rx_offload_capa &= vf_info.rx_offload_capa;
188 info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
189 info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
191 info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
192 info->max_tx_queues);
193 info->tx_offload_capa &= vf_info.tx_offload_capa;
194 info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
195 hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
197 info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
198 info->min_rx_bufsize);
199 info->max_rx_pktlen = RTE_MAX(vf_info.max_rx_pktlen,
200 info->max_rx_pktlen);
201 hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
206 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
208 struct rte_eth_dev *vf_dev;
211 rte_rwlock_read_lock(&hv->vf_lock);
212 vf_dev = hn_get_vf_dev(hv);
214 ret = hn_vf_info_merge(vf_dev, info);
215 rte_rwlock_read_unlock(&hv->vf_lock);
220 * Configure VF if present.
221 * Force VF to have same number of queues as synthetic device
223 int hn_vf_configure(struct rte_eth_dev *dev,
224 const struct rte_eth_conf *dev_conf)
226 struct hn_data *hv = dev->data->dev_private;
227 struct rte_eth_conf vf_conf = *dev_conf;
230 /* link state interrupt does not matter here. */
231 vf_conf.intr_conf.lsc = 0;
233 rte_rwlock_read_lock(&hv->vf_lock);
234 if (hv->vf_port != HN_INVALID_PORT) {
235 ret = rte_eth_dev_configure(hv->vf_port,
236 dev->data->nb_rx_queues,
237 dev->data->nb_tx_queues,
241 "VF configuration failed: %d", ret);
243 rte_rwlock_read_unlock(&hv->vf_lock);
247 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
249 struct hn_data *hv = dev->data->dev_private;
250 struct rte_eth_dev *vf_dev;
251 const uint32_t *ptypes = NULL;
253 rte_rwlock_read_lock(&hv->vf_lock);
254 vf_dev = hn_get_vf_dev(hv);
255 if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
256 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
257 rte_rwlock_read_unlock(&hv->vf_lock);
262 int hn_vf_start(struct rte_eth_dev *dev)
264 struct hn_data *hv = dev->data->dev_private;
265 struct rte_eth_dev *vf_dev;
268 rte_rwlock_read_lock(&hv->vf_lock);
269 vf_dev = hn_get_vf_dev(hv);
271 ret = rte_eth_dev_start(vf_dev->data->port_id);
272 rte_rwlock_read_unlock(&hv->vf_lock);
276 void hn_vf_stop(struct rte_eth_dev *dev)
278 struct hn_data *hv = dev->data->dev_private;
279 struct rte_eth_dev *vf_dev;
281 rte_rwlock_read_lock(&hv->vf_lock);
282 vf_dev = hn_get_vf_dev(hv);
284 rte_eth_dev_stop(vf_dev->data->port_id);
285 rte_rwlock_read_unlock(&hv->vf_lock);
288 /* If VF is present, then cascade configuration down */
289 #define VF_ETHDEV_FUNC(dev, func) \
291 struct hn_data *hv = (dev)->data->dev_private; \
292 struct rte_eth_dev *vf_dev; \
293 rte_rwlock_read_lock(&hv->vf_lock); \
294 vf_dev = hn_get_vf_dev(hv); \
296 func(vf_dev->data->port_id); \
297 rte_rwlock_read_unlock(&hv->vf_lock); \
300 /* If VF is present, then cascade configuration down */
301 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func) \
303 struct hn_data *hv = (dev)->data->dev_private; \
304 struct rte_eth_dev *vf_dev; \
306 rte_rwlock_read_lock(&hv->vf_lock); \
307 vf_dev = hn_get_vf_dev(hv); \
309 ret = func(vf_dev->data->port_id); \
310 rte_rwlock_read_unlock(&hv->vf_lock); \
314 void hn_vf_reset(struct rte_eth_dev *dev)
316 VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
319 void hn_vf_close(struct rte_eth_dev *dev)
321 struct hn_data *hv = dev->data->dev_private;
324 rte_rwlock_read_lock(&hv->vf_lock);
325 vf_port = hv->vf_port;
326 if (vf_port != HN_INVALID_PORT)
327 rte_eth_dev_close(vf_port);
329 hv->vf_port = HN_INVALID_PORT;
330 rte_rwlock_read_unlock(&hv->vf_lock);
333 int hn_vf_stats_reset(struct rte_eth_dev *dev)
335 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
338 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
340 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
343 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
345 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
348 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
350 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
353 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
355 VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
358 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
359 struct rte_ether_addr *mc_addr_set,
362 struct hn_data *hv = dev->data->dev_private;
363 struct rte_eth_dev *vf_dev;
366 rte_rwlock_read_lock(&hv->vf_lock);
367 vf_dev = hn_get_vf_dev(hv);
369 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
370 mc_addr_set, nb_mc_addr);
371 rte_rwlock_read_unlock(&hv->vf_lock);
375 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
376 uint16_t queue_idx, uint16_t nb_desc,
377 unsigned int socket_id,
378 const struct rte_eth_txconf *tx_conf)
380 struct hn_data *hv = dev->data->dev_private;
381 struct rte_eth_dev *vf_dev;
384 rte_rwlock_read_lock(&hv->vf_lock);
385 vf_dev = hn_get_vf_dev(hv);
387 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
390 rte_rwlock_read_unlock(&hv->vf_lock);
394 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
396 struct rte_eth_dev *vf_dev;
398 rte_rwlock_read_lock(&hv->vf_lock);
399 vf_dev = hn_get_vf_dev(hv);
400 if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
401 void *subq = vf_dev->data->tx_queues[queue_id];
403 (*vf_dev->dev_ops->tx_queue_release)(subq);
406 rte_rwlock_read_unlock(&hv->vf_lock);
409 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
410 uint16_t queue_idx, uint16_t nb_desc,
411 unsigned int socket_id,
412 const struct rte_eth_rxconf *rx_conf,
413 struct rte_mempool *mp)
415 struct hn_data *hv = dev->data->dev_private;
416 struct rte_eth_dev *vf_dev;
419 rte_rwlock_read_lock(&hv->vf_lock);
420 vf_dev = hn_get_vf_dev(hv);
422 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
424 socket_id, rx_conf, mp);
425 rte_rwlock_read_unlock(&hv->vf_lock);
429 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
431 struct rte_eth_dev *vf_dev;
433 rte_rwlock_read_lock(&hv->vf_lock);
434 vf_dev = hn_get_vf_dev(hv);
435 if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
436 void *subq = vf_dev->data->rx_queues[queue_id];
438 (*vf_dev->dev_ops->rx_queue_release)(subq);
440 rte_rwlock_read_unlock(&hv->vf_lock);
443 int hn_vf_stats_get(struct rte_eth_dev *dev,
444 struct rte_eth_stats *stats)
446 struct hn_data *hv = dev->data->dev_private;
447 struct rte_eth_dev *vf_dev;
450 rte_rwlock_read_lock(&hv->vf_lock);
451 vf_dev = hn_get_vf_dev(hv);
453 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
454 rte_rwlock_read_unlock(&hv->vf_lock);
458 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
459 struct rte_eth_xstat_name *names,
462 struct hn_data *hv = dev->data->dev_private;
463 struct rte_eth_dev *vf_dev;
466 rte_rwlock_read_lock(&hv->vf_lock);
467 vf_dev = hn_get_vf_dev(hv);
469 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
471 rte_rwlock_read_unlock(&hv->vf_lock);
473 /* add vf_ prefix to xstat names */
475 for (i = 0; i < count; i++) {
476 char tmp[RTE_ETH_XSTATS_NAME_SIZE];
478 snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
479 strlcpy(names[i].name, tmp, sizeof(names[i].name));
486 int hn_vf_xstats_get(struct rte_eth_dev *dev,
487 struct rte_eth_xstat *xstats,
491 struct hn_data *hv = dev->data->dev_private;
492 struct rte_eth_dev *vf_dev;
495 rte_rwlock_read_lock(&hv->vf_lock);
496 vf_dev = hn_get_vf_dev(hv);
498 count = rte_eth_xstats_get(vf_dev->data->port_id,
499 xstats + offset, n - offset);
500 rte_rwlock_read_unlock(&hv->vf_lock);
502 /* Offset id's for VF stats */
504 for (i = 0; i < count; i++)
505 xstats[i + offset].id += offset;
511 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
513 struct hn_data *hv = dev->data->dev_private;
514 struct rte_eth_dev *vf_dev;
517 rte_rwlock_read_lock(&hv->vf_lock);
518 vf_dev = hn_get_vf_dev(hv);
520 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
523 rte_rwlock_read_unlock(&hv->vf_lock);
528 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
529 struct rte_eth_rss_conf *rss_conf)
531 struct hn_data *hv = dev->data->dev_private;
532 struct rte_eth_dev *vf_dev;
535 rte_rwlock_read_lock(&hv->vf_lock);
536 vf_dev = hn_get_vf_dev(hv);
537 if (vf_dev && vf_dev->dev_ops->rss_hash_update)
538 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
539 rte_rwlock_read_unlock(&hv->vf_lock);
544 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
545 struct rte_eth_rss_reta_entry64 *reta_conf,
548 struct hn_data *hv = dev->data->dev_private;
549 struct rte_eth_dev *vf_dev;
552 rte_rwlock_read_lock(&hv->vf_lock);
553 vf_dev = hn_get_vf_dev(hv);
554 if (vf_dev && vf_dev->dev_ops->reta_update)
555 ret = vf_dev->dev_ops->reta_update(vf_dev,
556 reta_conf, reta_size);
557 rte_rwlock_read_unlock(&hv->vf_lock);