log: register with standardized names
[dpdk.git] / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27 #include <rte_alarm.h>
28
29 #include "hn_logs.h"
30 #include "hn_var.h"
31 #include "hn_nvs.h"
32
33 /* Search for VF with matching MAC address, return port id */
34 static int hn_vf_match(const struct rte_eth_dev *dev)
35 {
36         const struct rte_ether_addr *mac = dev->data->mac_addrs;
37         int i;
38
39         RTE_ETH_FOREACH_DEV(i) {
40                 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
41                 const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
42
43                 if (vf_dev == dev)
44                         continue;
45
46                 if (rte_is_same_ether_addr(mac, vf_mac))
47                         return i;
48         }
49         return -ENOENT;
50 }
51
52
53 /*
54  * Attach new PCI VF device and return the port_id
55  */
56 static int hn_vf_attach(struct rte_eth_dev *dev, struct hn_data *hv)
57 {
58         struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
59         int port, ret;
60
61         if (hv->vf_ctx.vf_attached) {
62                 PMD_DRV_LOG(ERR, "VF already attached");
63                 return 0;
64         }
65
66         port = hn_vf_match(dev);
67         if (port < 0) {
68                 PMD_DRV_LOG(NOTICE, "Couldn't find port for VF");
69                 return port;
70         }
71
72         PMD_DRV_LOG(NOTICE, "found matching VF port %d", port);
73         ret = rte_eth_dev_owner_get(port, &owner);
74         if (ret < 0) {
75                 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port);
76                 return ret;
77         }
78
79         if (owner.id != RTE_ETH_DEV_NO_OWNER) {
80                 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
81                             port, owner.name);
82                 return -EBUSY;
83         }
84
85         ret = rte_eth_dev_owner_set(port, &hv->owner);
86         if (ret < 0) {
87                 PMD_DRV_LOG(ERR, "Can set owner for port %d", port);
88                 return ret;
89         }
90
91         PMD_DRV_LOG(DEBUG, "Attach VF device %u", port);
92         hv->vf_ctx.vf_attached = true;
93         hv->vf_ctx.vf_port = port;
94         return 0;
95 }
96
97 static void hn_vf_remove(struct hn_data *hv);
98
99 static void hn_remove_delayed(void *args)
100 {
101         struct hn_data *hv = args;
102         uint16_t port_id = hv->vf_ctx.vf_port;
103         struct rte_device *dev = rte_eth_devices[port_id].device;
104         int ret;
105
106         /* Tell VSP to switch data path to synthentic */
107         hn_vf_remove(hv);
108
109         PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
110         rte_rwlock_write_lock(&hv->vf_lock);
111
112         /* Give back ownership */
113         ret = rte_eth_dev_owner_unset(port_id, hv->owner.id);
114         if (ret)
115                 PMD_DRV_LOG(ERR, "rte_eth_dev_owner_unset failed ret=%d",
116                             ret);
117         hv->vf_ctx.vf_attached = false;
118
119         ret = rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_INTR_RMV,
120                                               hn_eth_rmv_event_callback, hv);
121         if (ret)
122                 PMD_DRV_LOG(ERR,
123                             "rte_eth_dev_callback_unregister failed ret=%d",
124                             ret);
125
126         /* Detach and release port_id from system */
127         ret = rte_eth_dev_stop(port_id);
128         if (ret)
129                 PMD_DRV_LOG(ERR, "rte_eth_dev_stop failed port_id=%u ret=%d",
130                             port_id, ret);
131
132         ret = rte_eth_dev_close(port_id);
133         if (ret)
134                 PMD_DRV_LOG(ERR, "rte_eth_dev_close failed port_id=%u ret=%d",
135                             port_id, ret);
136
137         ret = rte_dev_remove(dev);
138         hv->vf_ctx.vf_state = vf_removed;
139
140         rte_rwlock_write_unlock(&hv->vf_lock);
141 }
142
143 int hn_eth_rmv_event_callback(uint16_t port_id,
144                               enum rte_eth_event_type event __rte_unused,
145                               void *cb_arg, void *out __rte_unused)
146 {
147         struct hn_data *hv = cb_arg;
148
149         PMD_DRV_LOG(NOTICE, "Removing VF portid %d", port_id);
150         rte_eal_alarm_set(1, hn_remove_delayed, hv);
151
152         return 0;
153 }
154
155 static int hn_setup_vf_queues(int port, struct rte_eth_dev *dev)
156 {
157         struct hn_rx_queue *rx_queue;
158         struct rte_eth_txq_info txinfo;
159         struct rte_eth_rxq_info rxinfo;
160         int i, ret = 0;
161
162         for (i = 0; i < dev->data->nb_tx_queues; i++) {
163                 ret = rte_eth_tx_queue_info_get(dev->data->port_id, i, &txinfo);
164                 if (ret) {
165                         PMD_DRV_LOG(ERR,
166                                     "rte_eth_tx_queue_info_get failed ret=%d",
167                                     ret);
168                         return ret;
169                 }
170
171                 ret = rte_eth_tx_queue_setup(port, i, txinfo.nb_desc, 0,
172                                              &txinfo.conf);
173                 if (ret) {
174                         PMD_DRV_LOG(ERR,
175                                     "rte_eth_tx_queue_setup failed ret=%d",
176                                     ret);
177                         return ret;
178                 }
179         }
180
181         for (i = 0; i < dev->data->nb_rx_queues; i++) {
182                 ret = rte_eth_rx_queue_info_get(dev->data->port_id, i, &rxinfo);
183                 if (ret) {
184                         PMD_DRV_LOG(ERR,
185                                     "rte_eth_rx_queue_info_get failed ret=%d",
186                                     ret);
187                         return ret;
188                 }
189
190                 rx_queue = dev->data->rx_queues[i];
191
192                 ret = rte_eth_rx_queue_setup(port, i, rxinfo.nb_desc, 0,
193                                              &rxinfo.conf, rx_queue->mb_pool);
194                 if (ret) {
195                         PMD_DRV_LOG(ERR,
196                                     "rte_eth_rx_queue_setup failed ret=%d",
197                                     ret);
198                         return ret;
199                 }
200         }
201
202         return ret;
203 }
204
205 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
206
207 static void hn_vf_add_retry(void *args)
208 {
209         struct rte_eth_dev *dev = args;
210         struct hn_data *hv = dev->data->dev_private;
211
212         hn_vf_add(dev, hv);
213 }
214
215 int hn_vf_configure(struct rte_eth_dev *dev,
216                     const struct rte_eth_conf *dev_conf);
217
218 /* Add new VF device to synthetic device */
219 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
220 {
221         int ret, port;
222
223         if (!hv->vf_ctx.vf_vsp_reported || hv->vf_ctx.vf_vsc_switched)
224                 return 0;
225
226         rte_rwlock_write_lock(&hv->vf_lock);
227
228         ret = hn_vf_attach(dev, hv);
229         if (ret) {
230                 PMD_DRV_LOG(NOTICE,
231                             "RNDIS reports VF but device not found, retrying");
232                 rte_eal_alarm_set(1000000, hn_vf_add_retry, dev);
233                 goto exit;
234         }
235
236         port = hv->vf_ctx.vf_port;
237
238         /* If the primary device has started, this is a VF host add.
239          * Configure and start VF device.
240          */
241         if (dev->data->dev_started) {
242                 if (rte_eth_devices[port].data->dev_started) {
243                         PMD_DRV_LOG(ERR, "VF already started on hot add");
244                         goto exit;
245                 }
246
247                 PMD_DRV_LOG(NOTICE, "configuring VF port %d", port);
248                 ret = hn_vf_configure(dev, &dev->data->dev_conf);
249                 if (ret) {
250                         PMD_DRV_LOG(ERR, "Failed to configure VF port %d",
251                                     port);
252                         goto exit;
253                 }
254
255                 ret = hn_setup_vf_queues(port, dev);
256                 if (ret) {
257                         PMD_DRV_LOG(ERR,
258                                     "Failed to configure VF queues port %d",
259                                     port);
260                         goto exit;
261                 }
262
263                 PMD_DRV_LOG(NOTICE, "Starting VF port %d", port);
264                 ret = rte_eth_dev_start(port);
265                 if (ret) {
266                         PMD_DRV_LOG(ERR, "rte_eth_dev_start failed ret=%d",
267                                     ret);
268                         goto exit;
269                 }
270                 hv->vf_ctx.vf_state = vf_started;
271         }
272
273         ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
274         if (ret == 0)
275                 hv->vf_ctx.vf_vsc_switched = true;
276
277 exit:
278         rte_rwlock_write_unlock(&hv->vf_lock);
279         return ret;
280 }
281
282 /* Switch data path to VF device */
283 static void hn_vf_remove(struct hn_data *hv)
284 {
285         int ret;
286
287         if (!hv->vf_ctx.vf_vsc_switched) {
288                 PMD_DRV_LOG(ERR, "VF path not active");
289                 return;
290         }
291
292         rte_rwlock_write_lock(&hv->vf_lock);
293         if (!hv->vf_ctx.vf_vsc_switched) {
294                 PMD_DRV_LOG(ERR, "VF path not active");
295         } else {
296                 /* Stop incoming packets from arriving on VF */
297                 ret = hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
298                 if (ret == 0)
299                         hv->vf_ctx.vf_vsc_switched = false;
300         }
301         rte_rwlock_write_unlock(&hv->vf_lock);
302 }
303
304 /* Handle VF association message from host */
305 void
306 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
307                       const struct vmbus_chanpkt_hdr *hdr,
308                       const void *data)
309 {
310         struct hn_data *hv = dev->data->dev_private;
311         const struct hn_nvs_vf_association *vf_assoc = data;
312
313         if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
314                 PMD_DRV_LOG(ERR, "invalid vf association NVS");
315                 return;
316         }
317
318         PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
319                     vf_assoc->serial,
320                     vf_assoc->allocated ? "add to" : "remove from",
321                     dev->data->port_id);
322
323         hv->vf_ctx.vf_vsp_reported = vf_assoc->allocated;
324
325         if (dev->state == RTE_ETH_DEV_ATTACHED) {
326                 if (vf_assoc->allocated)
327                         hn_vf_add(dev, hv);
328                 else
329                         hn_vf_remove(hv);
330         }
331 }
332
333 static void
334 hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
335                      const struct rte_eth_desc_lim *vf_lim)
336 {
337         lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
338         lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
339         lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
340         lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
341         lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
342 }
343
344 /*
345  * Merge the info from the VF and synthetic path.
346  * use the default config of the VF
347  * and the minimum number of queues and buffer sizes.
348  */
349 static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
350                              struct rte_eth_dev_info *info)
351 {
352         struct rte_eth_dev_info vf_info;
353         int ret;
354
355         ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
356         if (ret != 0)
357                 return ret;
358
359         info->speed_capa = vf_info.speed_capa;
360         info->default_rxportconf = vf_info.default_rxportconf;
361         info->default_txportconf = vf_info.default_txportconf;
362
363         info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
364                                       info->max_rx_queues);
365         info->rx_offload_capa &= vf_info.rx_offload_capa;
366         info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
367         info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
368
369         info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
370                                       info->max_tx_queues);
371         info->tx_offload_capa &= vf_info.tx_offload_capa;
372         info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
373         hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
374
375         info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
376                                        info->min_rx_bufsize);
377         info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
378                                        info->max_rx_pktlen);
379         hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
380
381         return 0;
382 }
383
384 int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
385 {
386         struct rte_eth_dev *vf_dev;
387         int ret = 0;
388
389         rte_rwlock_read_lock(&hv->vf_lock);
390         vf_dev = hn_get_vf_dev(hv);
391         if (vf_dev)
392                 ret = hn_vf_info_merge(vf_dev, info);
393         rte_rwlock_read_unlock(&hv->vf_lock);
394         return ret;
395 }
396
397 int hn_vf_configure(struct rte_eth_dev *dev,
398                     const struct rte_eth_conf *dev_conf)
399 {
400         struct hn_data *hv = dev->data->dev_private;
401         struct rte_eth_conf vf_conf = *dev_conf;
402         int ret = 0;
403
404         /* link state interrupt does not matter here. */
405         vf_conf.intr_conf.lsc = 0;
406
407         /* need to monitor removal event */
408         vf_conf.intr_conf.rmv = 1;
409
410         if (hv->vf_ctx.vf_attached) {
411                 ret = rte_eth_dev_callback_register(hv->vf_ctx.vf_port,
412                                                     RTE_ETH_EVENT_INTR_RMV,
413                                                     hn_eth_rmv_event_callback,
414                                                     hv);
415                 if (ret) {
416                         PMD_DRV_LOG(ERR,
417                                     "Registering callback failed for vf port %d ret %d",
418                                     hv->vf_ctx.vf_port, ret);
419                         return ret;
420                 }
421
422                 ret = rte_eth_dev_configure(hv->vf_ctx.vf_port,
423                                             dev->data->nb_rx_queues,
424                                             dev->data->nb_tx_queues,
425                                             &vf_conf);
426                 if (ret) {
427                         PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret);
428
429                         rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
430                                                         RTE_ETH_EVENT_INTR_RMV,
431                                                         hn_eth_rmv_event_callback,
432                                                         hv);
433
434                         return ret;
435                 }
436
437                 hv->vf_ctx.vf_state = vf_configured;
438         }
439
440         return ret;
441 }
442
443 /* Configure VF if present.
444  * VF device will have the same number of queues as the synthetic device
445  */
446 int hn_vf_configure_locked(struct rte_eth_dev *dev,
447                            const struct rte_eth_conf *dev_conf)
448 {
449         struct hn_data *hv = dev->data->dev_private;
450         int ret = 0;
451
452         rte_rwlock_write_lock(&hv->vf_lock);
453         ret = hn_vf_configure(dev, dev_conf);
454         rte_rwlock_write_unlock(&hv->vf_lock);
455
456         return ret;
457 }
458
459 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
460 {
461         struct hn_data *hv = dev->data->dev_private;
462         struct rte_eth_dev *vf_dev;
463         const uint32_t *ptypes = NULL;
464
465         rte_rwlock_read_lock(&hv->vf_lock);
466         vf_dev = hn_get_vf_dev(hv);
467         if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
468                 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
469         rte_rwlock_read_unlock(&hv->vf_lock);
470
471         return ptypes;
472 }
473
474 int hn_vf_start(struct rte_eth_dev *dev)
475 {
476         struct hn_data *hv = dev->data->dev_private;
477         struct rte_eth_dev *vf_dev;
478         int ret = 0;
479
480         rte_rwlock_read_lock(&hv->vf_lock);
481         vf_dev = hn_get_vf_dev(hv);
482         if (vf_dev)
483                 ret = rte_eth_dev_start(vf_dev->data->port_id);
484         rte_rwlock_read_unlock(&hv->vf_lock);
485         return ret;
486 }
487
488 int hn_vf_stop(struct rte_eth_dev *dev)
489 {
490         struct hn_data *hv = dev->data->dev_private;
491         struct rte_eth_dev *vf_dev;
492         int ret = 0;
493
494         rte_rwlock_read_lock(&hv->vf_lock);
495         vf_dev = hn_get_vf_dev(hv);
496         if (vf_dev) {
497                 ret = rte_eth_dev_stop(vf_dev->data->port_id);
498                 if (ret != 0)
499                         PMD_DRV_LOG(ERR, "Failed to stop device on port %u",
500                                     vf_dev->data->port_id);
501         }
502         rte_rwlock_read_unlock(&hv->vf_lock);
503
504         return ret;
505 }
506
507 /* If VF is present, then cascade configuration down */
508 #define VF_ETHDEV_FUNC(dev, func)                               \
509         {                                                       \
510                 struct hn_data *hv = (dev)->data->dev_private;  \
511                 struct rte_eth_dev *vf_dev;                     \
512                 rte_rwlock_read_lock(&hv->vf_lock);             \
513                 vf_dev = hn_get_vf_dev(hv);                     \
514                 if (vf_dev)                                     \
515                         func(vf_dev->data->port_id);            \
516                 rte_rwlock_read_unlock(&hv->vf_lock);           \
517         }
518
519 /* If VF is present, then cascade configuration down */
520 #define VF_ETHDEV_FUNC_RET_STATUS(dev, func)                    \
521         {                                                       \
522                 struct hn_data *hv = (dev)->data->dev_private;  \
523                 struct rte_eth_dev *vf_dev;                     \
524                 int ret = 0;                                    \
525                 rte_rwlock_read_lock(&hv->vf_lock);             \
526                 vf_dev = hn_get_vf_dev(hv);                     \
527                 if (vf_dev)                                     \
528                         ret = func(vf_dev->data->port_id);      \
529                 rte_rwlock_read_unlock(&hv->vf_lock);           \
530                 return ret;                                     \
531         }
532
533 void hn_vf_reset(struct rte_eth_dev *dev)
534 {
535         VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
536 }
537
538 int hn_vf_close(struct rte_eth_dev *dev)
539 {
540         int ret = 0;
541         struct hn_data *hv = dev->data->dev_private;
542
543         rte_eal_alarm_cancel(hn_vf_add_retry, dev);
544
545         rte_rwlock_read_lock(&hv->vf_lock);
546         if (hv->vf_ctx.vf_attached) {
547                 rte_eth_dev_callback_unregister(hv->vf_ctx.vf_port,
548                                                 RTE_ETH_EVENT_INTR_RMV,
549                                                 hn_eth_rmv_event_callback,
550                                                 hv);
551                 rte_eal_alarm_cancel(hn_remove_delayed, hv);
552                 ret = rte_eth_dev_close(hv->vf_ctx.vf_port);
553                 hv->vf_ctx.vf_attached = false;
554         }
555         rte_rwlock_read_unlock(&hv->vf_lock);
556
557         return ret;
558 }
559
560 int hn_vf_stats_reset(struct rte_eth_dev *dev)
561 {
562         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
563 }
564
565 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
566 {
567         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
568 }
569
570 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
571 {
572         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
573 }
574
575 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
576 {
577         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
578 }
579
580 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
581 {
582         VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
583 }
584
585 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
586                         struct rte_ether_addr *mc_addr_set,
587                         uint32_t nb_mc_addr)
588 {
589         struct hn_data *hv = dev->data->dev_private;
590         struct rte_eth_dev *vf_dev;
591         int ret = 0;
592
593         rte_rwlock_read_lock(&hv->vf_lock);
594         vf_dev = hn_get_vf_dev(hv);
595         if (vf_dev)
596                 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
597                                                    mc_addr_set, nb_mc_addr);
598         rte_rwlock_read_unlock(&hv->vf_lock);
599         return ret;
600 }
601
602 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
603                          uint16_t queue_idx, uint16_t nb_desc,
604                          unsigned int socket_id,
605                          const struct rte_eth_txconf *tx_conf)
606 {
607         struct hn_data *hv = dev->data->dev_private;
608         struct rte_eth_dev *vf_dev;
609         int ret = 0;
610
611         rte_rwlock_read_lock(&hv->vf_lock);
612         vf_dev = hn_get_vf_dev(hv);
613         if (vf_dev)
614                 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
615                                              queue_idx, nb_desc,
616                                              socket_id, tx_conf);
617         rte_rwlock_read_unlock(&hv->vf_lock);
618         return ret;
619 }
620
621 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
622 {
623         struct rte_eth_dev *vf_dev;
624
625         rte_rwlock_read_lock(&hv->vf_lock);
626         vf_dev = hn_get_vf_dev(hv);
627         if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
628                 void *subq = vf_dev->data->tx_queues[queue_id];
629
630                 (*vf_dev->dev_ops->tx_queue_release)(subq);
631         }
632
633         rte_rwlock_read_unlock(&hv->vf_lock);
634 }
635
636 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
637                          uint16_t queue_idx, uint16_t nb_desc,
638                          unsigned int socket_id,
639                          const struct rte_eth_rxconf *rx_conf,
640                          struct rte_mempool *mp)
641 {
642         struct hn_data *hv = dev->data->dev_private;
643         struct rte_eth_dev *vf_dev;
644         int ret = 0;
645
646         rte_rwlock_read_lock(&hv->vf_lock);
647         vf_dev = hn_get_vf_dev(hv);
648         if (vf_dev)
649                 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
650                                              queue_idx, nb_desc,
651                                              socket_id, rx_conf, mp);
652         rte_rwlock_read_unlock(&hv->vf_lock);
653         return ret;
654 }
655
656 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
657 {
658         struct rte_eth_dev *vf_dev;
659
660         rte_rwlock_read_lock(&hv->vf_lock);
661         vf_dev = hn_get_vf_dev(hv);
662         if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
663                 void *subq = vf_dev->data->rx_queues[queue_id];
664
665                 (*vf_dev->dev_ops->rx_queue_release)(subq);
666         }
667         rte_rwlock_read_unlock(&hv->vf_lock);
668 }
669
670 int hn_vf_stats_get(struct rte_eth_dev *dev,
671                     struct rte_eth_stats *stats)
672 {
673         struct hn_data *hv = dev->data->dev_private;
674         struct rte_eth_dev *vf_dev;
675         int ret = 0;
676
677         rte_rwlock_read_lock(&hv->vf_lock);
678         vf_dev = hn_get_vf_dev(hv);
679         if (vf_dev)
680                 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
681         rte_rwlock_read_unlock(&hv->vf_lock);
682         return ret;
683 }
684
685 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
686                            struct rte_eth_xstat_name *names,
687                            unsigned int n)
688 {
689         struct hn_data *hv = dev->data->dev_private;
690         struct rte_eth_dev *vf_dev;
691         int i, count = 0;
692
693         rte_rwlock_read_lock(&hv->vf_lock);
694         vf_dev = hn_get_vf_dev(hv);
695         if (vf_dev)
696                 count = rte_eth_xstats_get_names(vf_dev->data->port_id,
697                                                  names, n);
698         rte_rwlock_read_unlock(&hv->vf_lock);
699
700         /* add vf_ prefix to xstat names */
701         if (names) {
702                 for (i = 0; i < count; i++) {
703                         char tmp[RTE_ETH_XSTATS_NAME_SIZE];
704
705                         snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
706                         strlcpy(names[i].name, tmp, sizeof(names[i].name));
707                 }
708         }
709
710         return count;
711 }
712
713 int hn_vf_xstats_get(struct rte_eth_dev *dev,
714                      struct rte_eth_xstat *xstats,
715                      unsigned int offset,
716                      unsigned int n)
717 {
718         struct hn_data *hv = dev->data->dev_private;
719         struct rte_eth_dev *vf_dev;
720         int i, count = 0;
721
722         rte_rwlock_read_lock(&hv->vf_lock);
723         vf_dev = hn_get_vf_dev(hv);
724         if (vf_dev)
725                 count = rte_eth_xstats_get(vf_dev->data->port_id,
726                                            xstats + offset, n - offset);
727         rte_rwlock_read_unlock(&hv->vf_lock);
728
729         /* Offset id's for VF stats */
730         if (count > 0) {
731                 for (i = 0; i < count; i++)
732                         xstats[i + offset].id += offset;
733         }
734
735         return count;
736 }
737
738 int hn_vf_xstats_reset(struct rte_eth_dev *dev)
739 {
740         struct hn_data *hv = dev->data->dev_private;
741         struct rte_eth_dev *vf_dev;
742         int ret;
743
744         rte_rwlock_read_lock(&hv->vf_lock);
745         vf_dev = hn_get_vf_dev(hv);
746         if (vf_dev)
747                 ret = rte_eth_xstats_reset(vf_dev->data->port_id);
748         else
749                 ret = -EINVAL;
750         rte_rwlock_read_unlock(&hv->vf_lock);
751
752         return ret;
753 }
754
755 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
756                           struct rte_eth_rss_conf *rss_conf)
757 {
758         struct hn_data *hv = dev->data->dev_private;
759         struct rte_eth_dev *vf_dev;
760         int ret = 0;
761
762         rte_rwlock_read_lock(&hv->vf_lock);
763         vf_dev = hn_get_vf_dev(hv);
764         if (vf_dev && vf_dev->dev_ops->rss_hash_update)
765                 ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
766         rte_rwlock_read_unlock(&hv->vf_lock);
767
768         return ret;
769 }
770
771 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
772                            struct rte_eth_rss_reta_entry64 *reta_conf,
773                            uint16_t reta_size)
774 {
775         struct hn_data *hv = dev->data->dev_private;
776         struct rte_eth_dev *vf_dev;
777         int ret = 0;
778
779         rte_rwlock_read_lock(&hv->vf_lock);
780         vf_dev = hn_get_vf_dev(hv);
781         if (vf_dev && vf_dev->dev_ops->reta_update)
782                 ret = vf_dev->dev_ops->reta_update(vf_dev,
783                                                    reta_conf, reta_size);
784         rte_rwlock_read_unlock(&hv->vf_lock);
785
786         return ret;
787 }