d29eee762760f327b0b96644b23ce807a36465e7
[dpdk.git] / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35         const struct rte_ether_addr *mac = dev->data->mac_addrs;
36         int i;
37
38         RTE_ETH_FOREACH_DEV(i) {
39                 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40                 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
41
42                 if (vf_dev == dev)
43                         continue;
44
45                 if (rte_is_same_ether_addr(mac, vf_mac))
46                         return i;
47         }
48         return -ENOENT;
49 }
50
51
52 /*
53  * Attach new PCI VF device and return the port_id
54  */
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57         struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58         int ret;
59
60         if (hn_vf_attached(hv)) {
61                 PMD_DRV_LOG(ERR, "VF already attached");
62                 return -EEXIST;
63         }
64
65         ret = rte_eth_dev_owner_get(port_id, &owner);
66         if (ret < 0) {
67                 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68                 return ret;
69         }
70
71         if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72                 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73                             port_id, owner.name);
74                 return -EBUSY;
75         }
76
77         ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78         if (ret < 0) {
79                 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80                 return ret;
81         }
82
83         PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84         hv->vf_port = port_id;
85         return 0;
86 }
87
88 /* Add new VF device to synthetic device */
89 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
90 {
91         int port, err;
92
93         port = hn_vf_match(dev);
94         if (port < 0) {
95                 PMD_DRV_LOG(NOTICE, "No matching MAC found");
96                 return port;
97         }
98
99         err = hn_vf_attach(hv, port);
100         if (err == 0)
101                 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
102
103         return err;
104 }
105
106 /* Remove new VF device */
107 static void hn_vf_remove(struct hn_data *hv)
108 {
109
110         if (!hn_vf_attached(hv)) {
111                 PMD_DRV_LOG(ERR, "VF path not active");
112         } else {
113                 /* Stop incoming packets from arriving on VF */
114                 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
115
116                 /* Give back ownership */
117                 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
118
119                 /* Stop transmission over VF */
120                 hv->vf_port = HN_INVALID_PORT;
121         }
122 }
123
124 /* Handle VF association message from host */
125 void
126 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
127                       const struct vmbus_chanpkt_hdr *hdr,
128                       const void *data)
129 {
130         struct hn_data *hv = dev->data->dev_private;
131         const struct hn_nvs_vf_association *vf_assoc = data;
132
133         if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
134                 PMD_DRV_LOG(ERR, "invalid vf association NVS");
135                 return;
136         }
137
138         PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
139                     vf_assoc->serial,
140                     vf_assoc->allocated ? "add to" : "remove from",
141                     dev->data->port_id);
142
143         rte_rwlock_write_lock(&hv->vf_lock);
144         hv->vf_present = vf_assoc->allocated;
145
146         if (dev->state == RTE_ETH_DEV_ATTACHED) {
147                 if (vf_assoc->allocated)
148                         hn_vf_add(dev, hv);
149                 else
150                         hn_vf_remove(hv);
151         }
152         rte_rwlock_write_unlock(&hv->vf_lock);
153 }
154
155 static void
156 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
157                      const struct rte_eth_desc_lim *vf_lim)
158 {
159         lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
160         lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
161         lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
162         lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
163         lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
164 }
165
166 /*
167  * Merge the info from the VF and synthetic path.
168  * use the default config of the VF
169  * and the minimum number of queues and buffer sizes.
170  */
171 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
172                              struct rte_eth_dev_info *info)
173 {
174         struct rte_eth_dev_info vf_info;
175         int ret;
176
177         ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
178         if (ret != 0)
179                 return ret;
180
181         info->speed_capa = vf_info.speed_capa;
182         info->default_rxportconf = vf_info.default_rxportconf;
183         info->default_txportconf = vf_info.default_txportconf;
184
185         info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
186                                       info->max_rx_queues);
187         info->rx_offload_capa &= vf_info.rx_offload_capa;
188         info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
189         info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
190
191         info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
192                                       info->max_tx_queues);
193         info->tx_offload_capa &= vf_info.tx_offload_capa;
194         info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
195         hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
196
197         info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
198                                        info->min_rx_bufsize);
199         info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
200                                        info->max_rx_pktlen);
201         hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
202
203         return 0;
204 }
205
206 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
207 {
208         struct rte_eth_dev *vf_dev;
209         int ret = 0;
210
211         rte_rwlock_read_lock(&hv->vf_lock);
212         vf_dev = hn_get_vf_dev(hv);
213         if (vf_dev)
214                 ret = hn_vf_info_merge(vf_dev, info);
215         rte_rwlock_read_unlock(&hv->vf_lock);
216         return ret;
217 }
218
219 /*
220  * Configure VF if present.
221  * Force VF to have same number of queues as synthetic device
222  */
223 int hn_vf_configure(struct rte_eth_dev *dev,
224                     const struct rte_eth_conf *dev_conf)
225 {
226         struct hn_data *hv = dev->data->dev_private;
227         struct rte_eth_conf vf_conf = *dev_conf;
228         int ret = 0;
229
230         /* link state interrupt does not matter here. */
231         vf_conf.intr_conf.lsc = 0;
232
233         rte_rwlock_read_lock(&hv->vf_lock);
234         if (hv->vf_port != HN_INVALID_PORT) {
235                 ret = rte_eth_dev_configure(hv->vf_port,
236                                             dev->data->nb_rx_queues,
237                                             dev->data->nb_tx_queues,
238                                             &vf_conf);
239                 if (ret != 0)
240                         PMD_DRV_LOG(ERR,
241                                     "VF configuration failed: %d", ret);
242         }
243         rte_rwlock_read_unlock(&hv->vf_lock);
244         return ret;
245 }
246
247 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
248 {
249         struct hn_data *hv = dev->data->dev_private;
250         struct rte_eth_dev *vf_dev;
251         const uint32_t *ptypes = NULL;
252
253         rte_rwlock_read_lock(&hv->vf_lock);
254         vf_dev = hn_get_vf_dev(hv);
255         if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
256                 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
257         rte_rwlock_read_unlock(&hv->vf_lock);
258
259         return ptypes;
260 }
261
262 int hn_vf_start(struct rte_eth_dev *dev)
263 {
264         struct hn_data *hv = dev->data->dev_private;
265         struct rte_eth_dev *vf_dev;
266         int ret = 0;
267
268         rte_rwlock_read_lock(&hv->vf_lock);
269         vf_dev = hn_get_vf_dev(hv);
270         if (vf_dev)
271                 ret = rte_eth_dev_start(vf_dev->data->port_id);
272         rte_rwlock_read_unlock(&hv->vf_lock);
273         return ret;
274 }
275
276 void hn_vf_stop(struct rte_eth_dev *dev)
277 {
278         struct hn_data *hv = dev->data->dev_private;
279         struct rte_eth_dev *vf_dev;
280
281         rte_rwlock_read_lock(&hv->vf_lock);
282         vf_dev = hn_get_vf_dev(hv);
283         if (vf_dev)
284                 rte_eth_dev_stop(vf_dev->data->port_id);
285         rte_rwlock_read_unlock(&hv->vf_lock);
286 }
287
288 /* If VF is present, then cascade configuration down */
289 #define VF_ETHDEV_FUNC(dev, func)                               \
290         {                                                       \
291                 struct hn_data *hv = (dev)->data->dev_private;  \
292                 struct rte_eth_dev *vf_dev;                     \
293                 rte_rwlock_read_lock(&hv->vf_lock);             \
294                 vf_dev = hn_get_vf_dev(hv);                     \
295                 if (vf_dev)                                     \
296                         func(vf_dev->data->port_id);            \
297                 rte_rwlock_read_unlock(&hv->vf_lock);           \
298         }
299
300 /* If VF is present, then cascade configuration down */
301 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)                    \
302         {                                                       \
303                 struct hn_data *hv = (dev)->data->dev_private;  \
304                 struct rte_eth_dev *vf_dev;                     \
305                 int ret = 0;                                    \
306                 rte_rwlock_read_lock(&hv->vf_lock);             \
307                 vf_dev = hn_get_vf_dev(hv);                     \
308                 if (vf_dev)                                     \
309                         ret = func(vf_dev->data->port_id);      \
310                 rte_rwlock_read_unlock(&hv->vf_lock);           \
311                 return ret;                                     \
312         }
313
314 void hn_vf_reset(struct rte_eth_dev *dev)
315 {
316         VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
317 }
318
319 int hn_vf_close(struct rte_eth_dev *dev)
320 {
321         struct hn_data *hv = dev->data->dev_private;
322         uint16_t vf_port;
323         int ret = 0;
324
325         rte_rwlock_read_lock(&hv->vf_lock);
326         vf_port = hv->vf_port;
327         if (vf_port != HN_INVALID_PORT)
328                 ret = rte_eth_dev_close(vf_port);
329
330         hv->vf_port = HN_INVALID_PORT;
331         rte_rwlock_read_unlock(&hv->vf_lock);
332
333         return ret;
334 }
335
336 int hn_vf_stats_reset(struct rte_eth_dev *dev)
337 {
338         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
339 }
340
341 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
342 {
343         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
344 }
345
346 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
347 {
348         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
349 }
350
351 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
352 {
353         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
354 }
355
356 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
357 {
358         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
359 }
360
361 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
362                         struct rte_ether_addr *mc_addr_set,
363                         uint32_t nb_mc_addr)
364 {
365         struct hn_data *hv = dev->data->dev_private;
366         struct rte_eth_dev *vf_dev;
367         int ret = 0;
368
369         rte_rwlock_read_lock(&hv->vf_lock);
370         vf_dev = hn_get_vf_dev(hv);
371         if (vf_dev)
372                 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
373                                                    mc_addr_set, nb_mc_addr);
374         rte_rwlock_read_unlock(&hv->vf_lock);
375         return ret;
376 }
377
378 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
379                          uint16_t queue_idx, uint16_t nb_desc,
380                          unsigned int socket_id,
381                          const struct rte_eth_txconf *tx_conf)
382 {
383         struct hn_data *hv = dev->data->dev_private;
384         struct rte_eth_dev *vf_dev;
385         int ret = 0;
386
387         rte_rwlock_read_lock(&hv->vf_lock);
388         vf_dev = hn_get_vf_dev(hv);
389         if (vf_dev)
390                 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
391                                              queue_idx, nb_desc,
392                                              socket_id, tx_conf);
393         rte_rwlock_read_unlock(&hv->vf_lock);
394         return ret;
395 }
396
397 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
398 {
399         struct rte_eth_dev *vf_dev;
400
401         rte_rwlock_read_lock(&hv->vf_lock);
402         vf_dev = hn_get_vf_dev(hv);
403         if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
404                 void *subq = vf_dev->data->tx_queues[queue_id];
405
406                 (*vf_dev->dev_ops->tx_queue_release)(subq);
407         }
408
409         rte_rwlock_read_unlock(&hv->vf_lock);
410 }
411
412 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
413                          uint16_t queue_idx, uint16_t nb_desc,
414                          unsigned int socket_id,
415                          const struct rte_eth_rxconf *rx_conf,
416                          struct rte_mempool *mp)
417 {
418         struct hn_data *hv = dev->data->dev_private;
419         struct rte_eth_dev *vf_dev;
420         int ret = 0;
421
422         rte_rwlock_read_lock(&hv->vf_lock);
423         vf_dev = hn_get_vf_dev(hv);
424         if (vf_dev)
425                 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
426                                              queue_idx, nb_desc,
427                                              socket_id, rx_conf, mp);
428         rte_rwlock_read_unlock(&hv->vf_lock);
429         return ret;
430 }
431
432 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
433 {
434         struct rte_eth_dev *vf_dev;
435
436         rte_rwlock_read_lock(&hv->vf_lock);
437         vf_dev = hn_get_vf_dev(hv);
438         if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
439                 void *subq = vf_dev->data->rx_queues[queue_id];
440
441                 (*vf_dev->dev_ops->rx_queue_release)(subq);
442         }
443         rte_rwlock_read_unlock(&hv->vf_lock);
444 }
445
446 int hn_vf_stats_get(struct rte_eth_dev *dev,
447                     struct rte_eth_stats *stats)
448 {
449         struct hn_data *hv = dev->data->dev_private;
450         struct rte_eth_dev *vf_dev;
451         int ret = 0;
452
453         rte_rwlock_read_lock(&hv->vf_lock);
454         vf_dev = hn_get_vf_dev(hv);
455         if (vf_dev)
456                 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
457         rte_rwlock_read_unlock(&hv->vf_lock);
458         return ret;
459 }
460
461 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
462                            struct rte_eth_xstat_name *names,
463                            unsigned int n)
464 {
465         struct hn_data *hv = dev->data->dev_private;
466         struct rte_eth_dev *vf_dev;
467         int i, count = 0;
468
469         rte_rwlock_read_lock(&hv->vf_lock);
470         vf_dev = hn_get_vf_dev(hv);
471         if (vf_dev)
472                 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
473                                                  names, n);
474         rte_rwlock_read_unlock(&hv->vf_lock);
475
476         /* add vf_ prefix to xstat names */
477         if (names) {
478                 for (i = 0; i < count; i++) {
479                         char tmp[RTE_ETH_XSTATS_NAME_SIZE];
480
481                         snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
482                         strlcpy(names[i].name, tmp, sizeof(names[i].name));
483                 }
484         }
485
486         return count;
487 }
488
489 int hn_vf_xstats_get(struct rte_eth_dev *dev,
490                      struct rte_eth_xstat *xstats,
491                      unsigned int offset,
492                      unsigned int n)
493 {
494         struct hn_data *hv = dev->data->dev_private;
495         struct rte_eth_dev *vf_dev;
496         int i, count = 0;
497
498         rte_rwlock_read_lock(&hv->vf_lock);
499         vf_dev = hn_get_vf_dev(hv);
500         if (vf_dev)
501                 count = rte_eth_xstats_get(vf_dev->data->port_id,
502                                            xstats + offset, n - offset);
503         rte_rwlock_read_unlock(&hv->vf_lock);
504
505         /* Offset id's for VF stats */
506         if (count > 0) {
507                 for (i = 0; i < count; i++)
508                         xstats[i + offset].id += offset;
509         }
510
511         return count;
512 }
513
514 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
515 {
516         struct hn_data *hv = dev->data->dev_private;
517         struct rte_eth_dev *vf_dev;
518         int ret;
519
520         rte_rwlock_read_lock(&hv->vf_lock);
521         vf_dev = hn_get_vf_dev(hv);
522         if (vf_dev)
523                 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
524         else
525                 ret = -EINVAL;
526         rte_rwlock_read_unlock(&hv->vf_lock);
527
528         return ret;
529 }
530
531 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
532                           struct rte_eth_rss_conf *rss_conf)
533 {
534         struct hn_data *hv = dev->data->dev_private;
535         struct rte_eth_dev *vf_dev;
536         int ret = 0;
537
538         rte_rwlock_read_lock(&hv->vf_lock);
539         vf_dev = hn_get_vf_dev(hv);
540         if (vf_dev && vf_dev->dev_ops->rss_hash_update)
541                 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
542         rte_rwlock_read_unlock(&hv->vf_lock);
543
544         return ret;
545 }
546
547 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
548                            struct rte_eth_rss_reta_entry64 *reta_conf,
549                            uint16_t reta_size)
550 {
551         struct hn_data *hv = dev->data->dev_private;
552         struct rte_eth_dev *vf_dev;
553         int ret = 0;
554
555         rte_rwlock_read_lock(&hv->vf_lock);
556         vf_dev = hn_get_vf_dev(hv);
557         if (vf_dev && vf_dev->dev_ops->reta_update)
558                 ret = vf_dev->dev_ops->reta_update(vf_dev,
559                                                    reta_conf, reta_size);
560         rte_rwlock_read_unlock(&hv->vf_lock);
561
562         return ret;
563 }