net/netvsc: fix warning when VF is removed
[dpdk.git] / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35         const struct rte_ether_addr *mac = dev->data->mac_addrs;
36         int i;
37
38         RTE_ETH_FOREACH_DEV(i) {
39                 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40                 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
41
42                 if (vf_dev == dev)
43                         continue;
44
45                 if (rte_is_same_ether_addr(mac, vf_mac))
46                         return i;
47         }
48         return -ENOENT;
49 }
50
51
52 /*
53  * Attach new PCI VF device and return the port_id
54  */
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57         struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58         int ret;
59
60         if (hn_vf_attached(hv)) {
61                 PMD_DRV_LOG(ERR, "VF already attached");
62                 return -EEXIST;
63         }
64
65         ret = rte_eth_dev_owner_get(port_id, &owner);
66         if (ret < 0) {
67                 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68                 return ret;
69         }
70
71         if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72                 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73                             port_id, owner.name);
74                 return -EBUSY;
75         }
76
77         ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78         if (ret < 0) {
79                 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80                 return ret;
81         }
82
83         PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84         hv->vf_port = port_id;
85         return 0;
86 }
87
88 /* Add new VF device to synthetic device */
89 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
90 {
91         int port, err;
92
93         port = hn_vf_match(dev);
94         if (port < 0) {
95                 PMD_DRV_LOG(NOTICE, "No matching MAC found");
96                 return port;
97         }
98
99         err = hn_vf_attach(hv, port);
100         if (err == 0) {
101                 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
102                 hv->vf_intr = (struct rte_intr_handle) {
103                         .fd = -1,
104                         .type = RTE_INTR_HANDLE_EXT,
105                 };
106                 dev->intr_handle = &hv->vf_intr;
107                 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
108         }
109
110         return err;
111 }
112
113 /* Remove new VF device */
114 static void hn_vf_remove(struct hn_data *hv)
115 {
116
117         if (!hn_vf_attached(hv)) {
118                 PMD_DRV_LOG(ERR, "VF path not active");
119         } else {
120                 /* Stop incoming packets from arriving on VF */
121                 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
122
123                 /* Give back ownership */
124                 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
125
126                 /* Stop transmission over VF */
127                 hv->vf_port = HN_INVALID_PORT;
128         }
129 }
130
131 /* Handle VF association message from host */
132 void
133 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
134                       const struct vmbus_chanpkt_hdr *hdr,
135                       const void *data)
136 {
137         struct hn_data *hv = dev->data->dev_private;
138         const struct hn_nvs_vf_association *vf_assoc = data;
139
140         if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
141                 PMD_DRV_LOG(ERR, "invalid vf association NVS");
142                 return;
143         }
144
145         PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
146                     vf_assoc->serial,
147                     vf_assoc->allocated ? "add to" : "remove from",
148                     dev->data->port_id);
149
150         rte_rwlock_write_lock(&hv->vf_lock);
151         hv->vf_present = vf_assoc->allocated;
152
153         if (dev->state == RTE_ETH_DEV_ATTACHED) {
154                 if (vf_assoc->allocated)
155                         hn_vf_add(dev, hv);
156                 else
157                         hn_vf_remove(hv);
158         }
159         rte_rwlock_write_unlock(&hv->vf_lock);
160 }
161
162 static void
163 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
164                      const struct rte_eth_desc_lim *vf_lim)
165 {
166         lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
167         lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
168         lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
169         lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
170         lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
171 }
172
173 /*
174  * Merge the info from the VF and synthetic path.
175  * use the default config of the VF
176  * and the minimum number of queues and buffer sizes.
177  */
178 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
179                              struct rte_eth_dev_info *info)
180 {
181         struct rte_eth_dev_info vf_info;
182         int ret;
183
184         ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
185         if (ret != 0)
186                 return ret;
187
188         info->speed_capa = vf_info.speed_capa;
189         info->default_rxportconf = vf_info.default_rxportconf;
190         info->default_txportconf = vf_info.default_txportconf;
191
192         info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
193                                       info->max_rx_queues);
194         info->rx_offload_capa &= vf_info.rx_offload_capa;
195         info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
196         info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
197
198         info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
199                                       info->max_tx_queues);
200         info->tx_offload_capa &= vf_info.tx_offload_capa;
201         info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
202         hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
203
204         info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
205                                        info->min_rx_bufsize);
206         info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
207                                        info->max_rx_pktlen);
208         hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
209
210         return 0;
211 }
212
213 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
214 {
215         struct rte_eth_dev *vf_dev;
216         int ret = 0;
217
218         rte_rwlock_read_lock(&hv->vf_lock);
219         vf_dev = hn_get_vf_dev(hv);
220         if (vf_dev)
221                 ret = hn_vf_info_merge(vf_dev, info);
222         rte_rwlock_read_unlock(&hv->vf_lock);
223         return ret;
224 }
225
226 int hn_vf_link_update(struct rte_eth_dev *dev,
227                       int wait_to_complete)
228 {
229         struct hn_data *hv = dev->data->dev_private;
230         struct rte_eth_dev *vf_dev;
231         int ret = 0;
232
233         rte_rwlock_read_lock(&hv->vf_lock);
234         vf_dev = hn_get_vf_dev(hv);
235         if (vf_dev && vf_dev->dev_ops->link_update)
236                 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
237         rte_rwlock_read_unlock(&hv->vf_lock);
238
239         return ret;
240 }
241
242 /* called when VF has link state interrupts enabled */
243 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
244                            enum rte_eth_event_type event,
245                            void *cb_arg, void *out __rte_unused)
246 {
247         struct rte_eth_dev *dev = cb_arg;
248
249         if (event != RTE_ETH_EVENT_INTR_LSC)
250                 return 0;
251
252         /* if link state has changed pass on */
253         if (hn_dev_link_update(dev, 0) == 0)
254                 return 0; /* no change */
255
256         return _rte_eth_dev_callback_process(dev,
257                                              RTE_ETH_EVENT_INTR_LSC,
258                                              NULL);
259 }
260
261 static int _hn_vf_configure(struct rte_eth_dev *dev,
262                             uint16_t vf_port,
263                             const struct rte_eth_conf *dev_conf)
264 {
265         struct rte_eth_conf vf_conf = *dev_conf;
266         struct rte_eth_dev *vf_dev;
267         int ret;
268
269         vf_dev = &rte_eth_devices[vf_port];
270         if (dev_conf->intr_conf.lsc &&
271             (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
272                 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
273                             vf_port);
274                 vf_conf.intr_conf.lsc = 1;
275         } else {
276                 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
277                             vf_port);
278                 vf_conf.intr_conf.lsc = 0;
279         }
280
281         ret = rte_eth_dev_configure(vf_port,
282                                     dev->data->nb_rx_queues,
283                                     dev->data->nb_tx_queues,
284                                     &vf_conf);
285         if (ret) {
286                 PMD_DRV_LOG(ERR,
287                             "VF configuration failed: %d", ret);
288         } else if (vf_conf.intr_conf.lsc) {
289                 ret = rte_eth_dev_callback_register(vf_port,
290                                                     RTE_ETH_DEV_INTR_LSC,
291                                                     hn_vf_lsc_event, dev);
292                 if (ret)
293                         PMD_DRV_LOG(ERR,
294                                     "Failed to register LSC callback for VF %u",
295                                     vf_port);
296         }
297         return ret;
298 }
299
300 /*
301  * Configure VF if present.
302  * Force VF to have same number of queues as synthetic device
303  */
304 int hn_vf_configure(struct rte_eth_dev *dev,
305                     const struct rte_eth_conf *dev_conf)
306 {
307         struct hn_data *hv = dev->data->dev_private;
308         int ret = 0;
309
310         rte_rwlock_read_lock(&hv->vf_lock);
311         if (hv->vf_port != HN_INVALID_PORT)
312                 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
313         rte_rwlock_read_unlock(&hv->vf_lock);
314         return ret;
315 }
316
317 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
318 {
319         struct hn_data *hv = dev->data->dev_private;
320         struct rte_eth_dev *vf_dev;
321         const uint32_t *ptypes = NULL;
322
323         rte_rwlock_read_lock(&hv->vf_lock);
324         vf_dev = hn_get_vf_dev(hv);
325         if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
326                 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
327         rte_rwlock_read_unlock(&hv->vf_lock);
328
329         return ptypes;
330 }
331
332 int hn_vf_start(struct rte_eth_dev *dev)
333 {
334         struct hn_data *hv = dev->data->dev_private;
335         struct rte_eth_dev *vf_dev;
336         int ret = 0;
337
338         rte_rwlock_read_lock(&hv->vf_lock);
339         vf_dev = hn_get_vf_dev(hv);
340         if (vf_dev)
341                 ret = rte_eth_dev_start(vf_dev->data->port_id);
342         rte_rwlock_read_unlock(&hv->vf_lock);
343         return ret;
344 }
345
346 void hn_vf_stop(struct rte_eth_dev *dev)
347 {
348         struct hn_data *hv = dev->data->dev_private;
349         struct rte_eth_dev *vf_dev;
350
351         rte_rwlock_read_lock(&hv->vf_lock);
352         vf_dev = hn_get_vf_dev(hv);
353         if (vf_dev)
354                 rte_eth_dev_stop(vf_dev->data->port_id);
355         rte_rwlock_read_unlock(&hv->vf_lock);
356 }
357
358 /* If VF is present, then cascade configuration down */
359 #define VF_ETHDEV_FUNC(dev, func)                               \
360         {                                                       \
361                 struct hn_data *hv = (dev)->data->dev_private;  \
362                 struct rte_eth_dev *vf_dev;                     \
363                 rte_rwlock_read_lock(&hv->vf_lock);             \
364                 vf_dev = hn_get_vf_dev(hv);                     \
365                 if (vf_dev)                                     \
366                         func(vf_dev->data->port_id);            \
367                 rte_rwlock_read_unlock(&hv->vf_lock);           \
368         }
369
370 /* If VF is present, then cascade configuration down */
371 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)                    \
372         {                                                       \
373                 struct hn_data *hv = (dev)->data->dev_private;  \
374                 struct rte_eth_dev *vf_dev;                     \
375                 int ret = 0;                                    \
376                 rte_rwlock_read_lock(&hv->vf_lock);             \
377                 vf_dev = hn_get_vf_dev(hv);                     \
378                 if (vf_dev)                                     \
379                         ret = func(vf_dev->data->port_id);      \
380                 rte_rwlock_read_unlock(&hv->vf_lock);           \
381                 return ret;                                     \
382         }
383
384 void hn_vf_reset(struct rte_eth_dev *dev)
385 {
386         VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
387 }
388
389 void hn_vf_close(struct rte_eth_dev *dev)
390 {
391         struct hn_data *hv = dev->data->dev_private;
392         uint16_t vf_port;
393
394         rte_rwlock_read_lock(&hv->vf_lock);
395         vf_port = hv->vf_port;
396         if (vf_port != HN_INVALID_PORT)
397                 rte_eth_dev_close(vf_port);
398
399         hv->vf_port = HN_INVALID_PORT;
400         rte_rwlock_read_unlock(&hv->vf_lock);
401 }
402
403 int hn_vf_stats_reset(struct rte_eth_dev *dev)
404 {
405         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
406 }
407
408 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
409 {
410         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
411 }
412
413 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
414 {
415         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
416 }
417
418 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
419 {
420         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
421 }
422
423 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
424 {
425         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
426 }
427
428 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
429                         struct rte_ether_addr *mc_addr_set,
430                         uint32_t nb_mc_addr)
431 {
432         struct hn_data *hv = dev->data->dev_private;
433         struct rte_eth_dev *vf_dev;
434         int ret = 0;
435
436         rte_rwlock_read_lock(&hv->vf_lock);
437         vf_dev = hn_get_vf_dev(hv);
438         if (vf_dev)
439                 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
440                                                    mc_addr_set, nb_mc_addr);
441         rte_rwlock_read_unlock(&hv->vf_lock);
442         return ret;
443 }
444
445 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
446                          uint16_t queue_idx, uint16_t nb_desc,
447                          unsigned int socket_id,
448                          const struct rte_eth_txconf *tx_conf)
449 {
450         struct hn_data *hv = dev->data->dev_private;
451         struct rte_eth_dev *vf_dev;
452         int ret = 0;
453
454         rte_rwlock_read_lock(&hv->vf_lock);
455         vf_dev = hn_get_vf_dev(hv);
456         if (vf_dev)
457                 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
458                                              queue_idx, nb_desc,
459                                              socket_id, tx_conf);
460         rte_rwlock_read_unlock(&hv->vf_lock);
461         return ret;
462 }
463
464 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
465 {
466         struct rte_eth_dev *vf_dev;
467
468         rte_rwlock_read_lock(&hv->vf_lock);
469         vf_dev = hn_get_vf_dev(hv);
470         if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
471                 void *subq = vf_dev->data->tx_queues[queue_id];
472
473                 (*vf_dev->dev_ops->tx_queue_release)(subq);
474         }
475
476         rte_rwlock_read_unlock(&hv->vf_lock);
477 }
478
479 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
480                          uint16_t queue_idx, uint16_t nb_desc,
481                          unsigned int socket_id,
482                          const struct rte_eth_rxconf *rx_conf,
483                          struct rte_mempool *mp)
484 {
485         struct hn_data *hv = dev->data->dev_private;
486         struct rte_eth_dev *vf_dev;
487         int ret = 0;
488
489         rte_rwlock_read_lock(&hv->vf_lock);
490         vf_dev = hn_get_vf_dev(hv);
491         if (vf_dev)
492                 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
493                                              queue_idx, nb_desc,
494                                              socket_id, rx_conf, mp);
495         rte_rwlock_read_unlock(&hv->vf_lock);
496         return ret;
497 }
498
499 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
500 {
501         struct rte_eth_dev *vf_dev;
502
503         rte_rwlock_read_lock(&hv->vf_lock);
504         vf_dev = hn_get_vf_dev(hv);
505         if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
506                 void *subq = vf_dev->data->rx_queues[queue_id];
507
508                 (*vf_dev->dev_ops->rx_queue_release)(subq);
509         }
510         rte_rwlock_read_unlock(&hv->vf_lock);
511 }
512
513 int hn_vf_stats_get(struct rte_eth_dev *dev,
514                     struct rte_eth_stats *stats)
515 {
516         struct hn_data *hv = dev->data->dev_private;
517         struct rte_eth_dev *vf_dev;
518         int ret = 0;
519
520         rte_rwlock_read_lock(&hv->vf_lock);
521         vf_dev = hn_get_vf_dev(hv);
522         if (vf_dev)
523                 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
524         rte_rwlock_read_unlock(&hv->vf_lock);
525         return ret;
526 }
527
528 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
529                            struct rte_eth_xstat_name *names,
530                            unsigned int n)
531 {
532         struct hn_data *hv = dev->data->dev_private;
533         struct rte_eth_dev *vf_dev;
534         int i, count = 0;
535
536         rte_rwlock_read_lock(&hv->vf_lock);
537         vf_dev = hn_get_vf_dev(hv);
538         if (vf_dev)
539                 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
540                                                  names, n);
541         rte_rwlock_read_unlock(&hv->vf_lock);
542
543         /* add vf_ prefix to xstat names */
544         if (names) {
545                 for (i = 0; i < count; i++) {
546                         char tmp[RTE_ETH_XSTATS_NAME_SIZE];
547
548                         snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
549                         strlcpy(names[i].name, tmp, sizeof(names[i].name));
550                 }
551         }
552
553         return count;
554 }
555
556 int hn_vf_xstats_get(struct rte_eth_dev *dev,
557                      struct rte_eth_xstat *xstats,
558                      unsigned int offset,
559                      unsigned int n)
560 {
561         struct hn_data *hv = dev->data->dev_private;
562         struct rte_eth_dev *vf_dev;
563         int i, count = 0;
564
565         rte_rwlock_read_lock(&hv->vf_lock);
566         vf_dev = hn_get_vf_dev(hv);
567         if (vf_dev)
568                 count = rte_eth_xstats_get(vf_dev->data->port_id,
569                                            xstats + offset, n - offset);
570         rte_rwlock_read_unlock(&hv->vf_lock);
571
572         /* Offset id's for VF stats */
573         if (count > 0) {
574                 for (i = 0; i < count; i++)
575                         xstats[i + offset].id += offset;
576         }
577
578         return count;
579 }
580
581 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
582 {
583         struct hn_data *hv = dev->data->dev_private;
584         struct rte_eth_dev *vf_dev;
585         int ret;
586
587         rte_rwlock_read_lock(&hv->vf_lock);
588         vf_dev = hn_get_vf_dev(hv);
589         if (vf_dev)
590                 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
591         else
592                 ret = -EINVAL;
593         rte_rwlock_read_unlock(&hv->vf_lock);
594
595         return ret;
596 }
597
598 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
599                           struct rte_eth_rss_conf *rss_conf)
600 {
601         struct hn_data *hv = dev->data->dev_private;
602         struct rte_eth_dev *vf_dev;
603         int ret = 0;
604
605         rte_rwlock_read_lock(&hv->vf_lock);
606         vf_dev = hn_get_vf_dev(hv);
607         if (vf_dev && vf_dev->dev_ops->rss_hash_update)
608                 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
609         rte_rwlock_read_unlock(&hv->vf_lock);
610
611         return ret;
612 }
613
614 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
615                            struct rte_eth_rss_reta_entry64 *reta_conf,
616                            uint16_t reta_size)
617 {
618         struct hn_data *hv = dev->data->dev_private;
619         struct rte_eth_dev *vf_dev;
620         int ret = 0;
621
622         rte_rwlock_read_lock(&hv->vf_lock);
623         vf_dev = hn_get_vf_dev(hv);
624         if (vf_dev && vf_dev->dev_ops->reta_update)
625                 ret = vf_dev->dev_ops->reta_update(vf_dev,
626                                                    reta_conf, reta_size);
627         rte_rwlock_read_unlock(&hv->vf_lock);
628
629         return ret;
630 }