net/bnxt: add Truflow flush-timer to alloc table scope
[dpdk.git] / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35         const struct rte_ether_addr *mac = dev->data->mac_addrs;
36         int i;
37
38         RTE_ETH_FOREACH_DEV(i) {
39                 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
40                 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
41
42                 if (vf_dev == dev)
43                         continue;
44
45                 if (rte_is_same_ether_addr(mac, vf_mac))
46                         return i;
47         }
48         return -ENOENT;
49 }
50
51
52 /*
53  * Attach new PCI VF device and return the port_id
54  */
55 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
56 {
57         struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
58         int ret;
59
60         if (hn_vf_attached(hv)) {
61                 PMD_DRV_LOG(ERR, "VF already attached");
62                 return -EEXIST;
63         }
64
65         ret = rte_eth_dev_owner_get(port_id, &owner);
66         if (ret < 0) {
67                 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
68                 return ret;
69         }
70
71         if (owner.id != RTE_ETH_DEV_NO_OWNER) {
72                 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
73                             port_id, owner.name);
74                 return -EBUSY;
75         }
76
77         ret = rte_eth_dev_owner_set(port_id, &hv->owner);
78         if (ret < 0) {
79                 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
80                 return ret;
81         }
82
83         PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
84         hv->vf_port = port_id;
85         rte_smp_wmb();
86
87         return 0;
88 }
89
90 /* Add new VF device to synthetic device */
91 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
92 {
93         int port, err;
94
95         port = hn_vf_match(dev);
96         if (port < 0) {
97                 PMD_DRV_LOG(NOTICE, "No matching MAC found");
98                 return port;
99         }
100
101         rte_spinlock_lock(&hv->vf_lock);
102         err = hn_vf_attach(hv, port);
103
104         if (err == 0) {
105                 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
106                 hv->vf_intr = (struct rte_intr_handle) {
107                         .fd = -1,
108                         .type = RTE_INTR_HANDLE_EXT,
109                 };
110                 dev->intr_handle = &hv->vf_intr;
111                 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
112         }
113         rte_spinlock_unlock(&hv->vf_lock);
114
115         return err;
116 }
117
118 /* Remove new VF device */
119 static void hn_vf_remove(struct hn_data *hv)
120 {
121
122         rte_spinlock_lock(&hv->vf_lock);
123
124         if (!hn_vf_attached(hv)) {
125                 PMD_DRV_LOG(ERR, "VF path not active");
126         } else {
127                 /* Stop incoming packets from arriving on VF */
128                 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
129
130                 /* Stop transmission over VF */
131                 hv->vf_port = HN_INVALID_PORT;
132                 rte_smp_wmb();
133
134                 /* Give back ownership */
135                 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
136         }
137         rte_spinlock_unlock(&hv->vf_lock);
138 }
139
140 /* Handle VF association message from host */
141 void
142 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
143                       const struct vmbus_chanpkt_hdr *hdr,
144                       const void *data)
145 {
146         struct hn_data *hv = dev->data->dev_private;
147         const struct hn_nvs_vf_association *vf_assoc = data;
148
149         if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
150                 PMD_DRV_LOG(ERR, "invalid vf association NVS");
151                 return;
152         }
153
154         PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
155                     vf_assoc->serial,
156                     vf_assoc->allocated ? "add to" : "remove from",
157                     dev->data->port_id);
158
159         hv->vf_present = vf_assoc->allocated;
160
161         if (dev->state != RTE_ETH_DEV_ATTACHED)
162                 return;
163
164         if (vf_assoc->allocated)
165                 hn_vf_add(dev, hv);
166         else
167                 hn_vf_remove(hv);
168 }
169
170 static void
171 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
172                      const struct rte_eth_desc_lim *vf_lim)
173 {
174         lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
175         lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
176         lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
177         lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
178         lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
179 }
180
181 /*
182  * Merge the info from the VF and synthetic path.
183  * use the default config of the VF
184  * and the minimum number of queues and buffer sizes.
185  */
186 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
187                              struct rte_eth_dev_info *info)
188 {
189         struct rte_eth_dev_info vf_info;
190         int ret;
191
192         ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
193         if (ret != 0)
194                 return ret;
195
196         info->speed_capa = vf_info.speed_capa;
197         info->default_rxportconf = vf_info.default_rxportconf;
198         info->default_txportconf = vf_info.default_txportconf;
199
200         info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
201                                       info->max_rx_queues);
202         info->rx_offload_capa &= vf_info.rx_offload_capa;
203         info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
204         info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
205
206         info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
207                                       info->max_tx_queues);
208         info->tx_offload_capa &= vf_info.tx_offload_capa;
209         info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
210         hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
211
212         info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
213                                        info->min_rx_bufsize);
214         info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
215                                        info->max_rx_pktlen);
216         hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
217
218         return 0;
219 }
220
221 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
222 {
223         struct rte_eth_dev *vf_dev;
224         int ret = 0;
225
226         rte_spinlock_lock(&hv->vf_lock);
227         vf_dev = hn_get_vf_dev(hv);
228         if (vf_dev)
229                 ret = hn_vf_info_merge(vf_dev, info);
230         rte_spinlock_unlock(&hv->vf_lock);
231         return ret;
232 }
233
234 int hn_vf_link_update(struct rte_eth_dev *dev,
235                       int wait_to_complete)
236 {
237         struct hn_data *hv = dev->data->dev_private;
238         struct rte_eth_dev *vf_dev;
239         int ret = 0;
240
241         rte_spinlock_lock(&hv->vf_lock);
242         vf_dev = hn_get_vf_dev(hv);
243         if (vf_dev && vf_dev->dev_ops->link_update)
244                 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
245         rte_spinlock_unlock(&hv->vf_lock);
246
247         return ret;
248 }
249
250 /* called when VF has link state interrupts enabled */
251 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
252                            enum rte_eth_event_type event,
253                            void *cb_arg, void *out __rte_unused)
254 {
255         struct rte_eth_dev *dev = cb_arg;
256
257         if (event != RTE_ETH_EVENT_INTR_LSC)
258                 return 0;
259
260         /* if link state has changed pass on */
261         if (hn_dev_link_update(dev, 0) == 0)
262                 return 0; /* no change */
263
264         return _rte_eth_dev_callback_process(dev,
265                                              RTE_ETH_EVENT_INTR_LSC,
266                                              NULL);
267 }
268
269 static int _hn_vf_configure(struct rte_eth_dev *dev,
270                             uint16_t vf_port,
271                             const struct rte_eth_conf *dev_conf)
272 {
273         struct rte_eth_conf vf_conf = *dev_conf;
274         struct rte_eth_dev *vf_dev;
275         int ret;
276
277         vf_dev = &rte_eth_devices[vf_port];
278         if (dev_conf->intr_conf.lsc &&
279             (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
280                 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
281                             vf_port);
282                 vf_conf.intr_conf.lsc = 1;
283         } else {
284                 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
285                             vf_port);
286                 vf_conf.intr_conf.lsc = 0;
287         }
288
289         ret = rte_eth_dev_configure(vf_port,
290                                     dev->data->nb_rx_queues,
291                                     dev->data->nb_tx_queues,
292                                     &vf_conf);
293         if (ret) {
294                 PMD_DRV_LOG(ERR,
295                             "VF configuration failed: %d", ret);
296         } else if (vf_conf.intr_conf.lsc) {
297                 ret = rte_eth_dev_callback_register(vf_port,
298                                                     RTE_ETH_DEV_INTR_LSC,
299                                                     hn_vf_lsc_event, dev);
300                 if (ret)
301                         PMD_DRV_LOG(ERR,
302                                     "Failed to register LSC callback for VF %u",
303                                     vf_port);
304         }
305         return ret;
306 }
307
308 /*
309  * Configure VF if present.
310  * Force VF to have same number of queues as synthetic device
311  */
312 int hn_vf_configure(struct rte_eth_dev *dev,
313                     const struct rte_eth_conf *dev_conf)
314 {
315         struct hn_data *hv = dev->data->dev_private;
316         int ret = 0;
317
318         rte_spinlock_lock(&hv->vf_lock);
319         if (hv->vf_port != HN_INVALID_PORT)
320                 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
321         rte_spinlock_unlock(&hv->vf_lock);
322         return ret;
323 }
324
325 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
326 {
327         struct hn_data *hv = dev->data->dev_private;
328         struct rte_eth_dev *vf_dev;
329         const uint32_t *ptypes = NULL;
330
331         rte_spinlock_lock(&hv->vf_lock);
332         vf_dev = hn_get_vf_dev(hv);
333         if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
334                 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
335         rte_spinlock_unlock(&hv->vf_lock);
336
337         return ptypes;
338 }
339
340 int hn_vf_start(struct rte_eth_dev *dev)
341 {
342         struct hn_data *hv = dev->data->dev_private;
343         struct rte_eth_dev *vf_dev;
344         int ret = 0;
345
346         rte_spinlock_lock(&hv->vf_lock);
347         vf_dev = hn_get_vf_dev(hv);
348         if (vf_dev)
349                 ret = rte_eth_dev_start(vf_dev->data->port_id);
350         rte_spinlock_unlock(&hv->vf_lock);
351         return ret;
352 }
353
354 void hn_vf_stop(struct rte_eth_dev *dev)
355 {
356         struct hn_data *hv = dev->data->dev_private;
357         struct rte_eth_dev *vf_dev;
358
359         rte_spinlock_lock(&hv->vf_lock);
360         vf_dev = hn_get_vf_dev(hv);
361         if (vf_dev)
362                 rte_eth_dev_stop(vf_dev->data->port_id);
363         rte_spinlock_unlock(&hv->vf_lock);
364 }
365
366 /* If VF is present, then cascade configuration down */
367 #define VF_ETHDEV_FUNC(dev, func)                               \
368         {                                                       \
369                 struct hn_data *hv = (dev)->data->dev_private;  \
370                 struct rte_eth_dev *vf_dev;                     \
371                 rte_spinlock_lock(&hv->vf_lock);                \
372                 vf_dev = hn_get_vf_dev(hv);                     \
373                 if (vf_dev)                                     \
374                         func(vf_dev->data->port_id);            \
375                 rte_spinlock_unlock(&hv->vf_lock);              \
376         }
377
378 /* If VF is present, then cascade configuration down */
379 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)                    \
380         {                                                       \
381                 struct hn_data *hv = (dev)->data->dev_private;  \
382                 struct rte_eth_dev *vf_dev;                     \
383                 int ret = 0;                                    \
384                 rte_spinlock_lock(&hv->vf_lock);                \
385                 vf_dev = hn_get_vf_dev(hv);                     \
386                 if (vf_dev)                                     \
387                         ret = func(vf_dev->data->port_id);      \
388                 rte_spinlock_unlock(&hv->vf_lock);              \
389                 return ret;                                     \
390         }
391
392 void hn_vf_reset(struct rte_eth_dev *dev)
393 {
394         VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
395 }
396
397 void hn_vf_close(struct rte_eth_dev *dev)
398 {
399         struct hn_data *hv = dev->data->dev_private;
400         uint16_t vf_port;
401
402         rte_spinlock_lock(&hv->vf_lock);
403         vf_port = hv->vf_port;
404         if (vf_port != HN_INVALID_PORT)
405                 rte_eth_dev_close(vf_port);
406
407         hv->vf_port = HN_INVALID_PORT;
408         rte_spinlock_unlock(&hv->vf_lock);
409 }
410
411 int hn_vf_stats_reset(struct rte_eth_dev *dev)
412 {
413         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
414 }
415
416 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
417 {
418         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
419 }
420
421 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
422 {
423         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
424 }
425
426 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
427 {
428         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
429 }
430
431 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
432 {
433         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
434 }
435
436 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
437                         struct rte_ether_addr *mc_addr_set,
438                         uint32_t nb_mc_addr)
439 {
440         struct hn_data *hv = dev->data->dev_private;
441         struct rte_eth_dev *vf_dev;
442         int ret = 0;
443
444         rte_spinlock_lock(&hv->vf_lock);
445         vf_dev = hn_get_vf_dev(hv);
446         if (vf_dev)
447                 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
448                                                    mc_addr_set, nb_mc_addr);
449         rte_spinlock_unlock(&hv->vf_lock);
450         return ret;
451 }
452
453 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
454                          uint16_t queue_idx, uint16_t nb_desc,
455                          unsigned int socket_id,
456                          const struct rte_eth_txconf *tx_conf)
457 {
458         struct hn_data *hv = dev->data->dev_private;
459         struct rte_eth_dev *vf_dev;
460         int ret = 0;
461
462         rte_spinlock_lock(&hv->vf_lock);
463         vf_dev = hn_get_vf_dev(hv);
464         if (vf_dev)
465                 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
466                                              queue_idx, nb_desc,
467                                              socket_id, tx_conf);
468         rte_spinlock_unlock(&hv->vf_lock);
469         return ret;
470 }
471
472 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
473 {
474         struct rte_eth_dev *vf_dev;
475
476         rte_spinlock_lock(&hv->vf_lock);
477         vf_dev = hn_get_vf_dev(hv);
478         if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
479                 void *subq = vf_dev->data->tx_queues[queue_id];
480
481                 (*vf_dev->dev_ops->tx_queue_release)(subq);
482         }
483
484         rte_spinlock_unlock(&hv->vf_lock);
485 }
486
487 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
488                          uint16_t queue_idx, uint16_t nb_desc,
489                          unsigned int socket_id,
490                          const struct rte_eth_rxconf *rx_conf,
491                          struct rte_mempool *mp)
492 {
493         struct hn_data *hv = dev->data->dev_private;
494         struct rte_eth_dev *vf_dev;
495         int ret = 0;
496
497         rte_spinlock_lock(&hv->vf_lock);
498         vf_dev = hn_get_vf_dev(hv);
499         if (vf_dev)
500                 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
501                                              queue_idx, nb_desc,
502                                              socket_id, rx_conf, mp);
503         rte_spinlock_unlock(&hv->vf_lock);
504         return ret;
505 }
506
507 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
508 {
509         struct rte_eth_dev *vf_dev;
510
511         rte_spinlock_lock(&hv->vf_lock);
512         vf_dev = hn_get_vf_dev(hv);
513         if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
514                 void *subq = vf_dev->data->rx_queues[queue_id];
515
516                 (*vf_dev->dev_ops->rx_queue_release)(subq);
517         }
518         rte_spinlock_unlock(&hv->vf_lock);
519 }
520
521 int hn_vf_stats_get(struct rte_eth_dev *dev,
522                     struct rte_eth_stats *stats)
523 {
524         struct hn_data *hv = dev->data->dev_private;
525         struct rte_eth_dev *vf_dev;
526         int ret = 0;
527
528         rte_spinlock_lock(&hv->vf_lock);
529         vf_dev = hn_get_vf_dev(hv);
530         if (vf_dev)
531                 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
532         rte_spinlock_unlock(&hv->vf_lock);
533         return ret;
534 }
535
536 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
537                            struct rte_eth_xstat_name *names,
538                            unsigned int n)
539 {
540         struct hn_data *hv = dev->data->dev_private;
541         struct rte_eth_dev *vf_dev;
542         int i, count = 0;
543
544         rte_spinlock_lock(&hv->vf_lock);
545         vf_dev = hn_get_vf_dev(hv);
546         if (vf_dev)
547                 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
548                                                  names, n);
549         rte_spinlock_unlock(&hv->vf_lock);
550
551         /* add vf_ prefix to xstat names */
552         if (names) {
553                 for (i = 0; i < count; i++) {
554                         char tmp[RTE_ETH_XSTATS_NAME_SIZE];
555
556                         snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
557                         strlcpy(names[i].name, tmp, sizeof(names[i].name));
558                 }
559         }
560
561         return count;
562 }
563
564 int hn_vf_xstats_get(struct rte_eth_dev *dev,
565                      struct rte_eth_xstat *xstats,
566                      unsigned int offset,
567                      unsigned int n)
568 {
569         struct hn_data *hv = dev->data->dev_private;
570         struct rte_eth_dev *vf_dev;
571         int i, count = 0;
572
573         rte_spinlock_lock(&hv->vf_lock);
574         vf_dev = hn_get_vf_dev(hv);
575         if (vf_dev)
576                 count = rte_eth_xstats_get(vf_dev->data->port_id,
577                                            xstats + offset, n - offset);
578         rte_spinlock_unlock(&hv->vf_lock);
579
580         /* Offset id's for VF stats */
581         if (count > 0) {
582                 for (i = 0; i < count; i++)
583                         xstats[i + offset].id += offset;
584         }
585
586         return count;
587 }
588
589 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
590 {
591         struct hn_data *hv = dev->data->dev_private;
592         struct rte_eth_dev *vf_dev;
593         int ret;
594
595         rte_spinlock_lock(&hv->vf_lock);
596         vf_dev = hn_get_vf_dev(hv);
597         if (vf_dev)
598                 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
599         else
600                 ret = -EINVAL;
601         rte_spinlock_unlock(&hv->vf_lock);
602
603         return ret;
604 }
605
606 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
607                           struct rte_eth_rss_conf *rss_conf)
608 {
609         struct hn_data *hv = dev->data->dev_private;
610         struct rte_eth_dev *vf_dev;
611         int ret = 0;
612
613         rte_spinlock_lock(&hv->vf_lock);
614         vf_dev = hn_get_vf_dev(hv);
615         if (vf_dev && vf_dev->dev_ops->rss_hash_update)
616                 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
617         rte_spinlock_unlock(&hv->vf_lock);
618
619         return ret;
620 }
621
622 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
623                            struct rte_eth_rss_reta_entry64 *reta_conf,
624                            uint16_t reta_size)
625 {
626         struct hn_data *hv = dev->data->dev_private;
627         struct rte_eth_dev *vf_dev;
628         int ret = 0;
629
630         rte_spinlock_lock(&hv->vf_lock);
631         vf_dev = hn_get_vf_dev(hv);
632         if (vf_dev && vf_dev->dev_ops->reta_update)
633                 ret = vf_dev->dev_ops->reta_update(vf_dev,
634                                                    reta_conf, reta_size);
635         rte_spinlock_unlock(&hv->vf_lock);
636
637         return ret;
638 }