cb41bb14061db089516c2b8f6a2ceb64ea07a422
[dpdk.git] / drivers / net / enic / enic_vf_representor.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dev.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
15 #include <rte_pci.h>
16 #include <rte_string_fns.h>
17
18 #include "enic_compat.h"
19 #include "enic.h"
20 #include "vnic_dev.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
23 #include "vnic_cq.h"
24 #include "vnic_wq.h"
25 #include "vnic_rq.h"
26
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28                                   struct rte_mbuf **rx_pkts,
29                                   uint16_t nb_pkts)
30 {
31         return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
32 }
33
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35                                   struct rte_mbuf **tx_pkts,
36                                   uint16_t nb_pkts)
37 {
38         return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
39 }
40
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
42         uint16_t queue_idx,
43         uint16_t nb_desc,
44         unsigned int socket_id,
45         const struct rte_eth_txconf *tx_conf)
46 {
47         struct enic_vf_representor *vf;
48         struct vnic_wq *wq;
49         struct enic *pf;
50         int err;
51
52         ENICPMD_FUNC_TRACE();
53         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54                 return -E_RTE_SECONDARY;
55         /* Only one queue now */
56         if (queue_idx != 0)
57                 return -EINVAL;
58         vf = eth_dev->data->dev_private;
59         pf = vf->pf;
60         wq = &pf->wq[vf->pf_wq_idx];
61         wq->offloads = tx_conf->offloads |
62                 eth_dev->data->dev_conf.txmode.offloads;
63         eth_dev->data->tx_queues[0] = (void *)wq;
64         /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65         err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
66         if (err) {
67                 ENICPMD_LOG(ERR, "error in allocating wq\n");
68                 return err;
69         }
70         return 0;
71 }
72
73 static void enic_vf_dev_tx_queue_release(void *txq)
74 {
75         ENICPMD_FUNC_TRACE();
76         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
77                 return;
78         enic_free_wq(txq);
79 }
80
81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
82         uint16_t queue_idx,
83         uint16_t nb_desc,
84         unsigned int socket_id,
85         const struct rte_eth_rxconf *rx_conf,
86         struct rte_mempool *mp)
87 {
88         struct enic_vf_representor *vf;
89         struct enic *pf;
90         int ret;
91
92         ENICPMD_FUNC_TRACE();
93         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94                 return -E_RTE_SECONDARY;
95         /* Only 1 queue now */
96         if (queue_idx != 0)
97                 return -EINVAL;
98         vf = eth_dev->data->dev_private;
99         pf = vf->pf;
100         eth_dev->data->rx_queues[queue_idx] =
101                 (void *)&pf->rq[vf->pf_rq_sop_idx];
102         ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103                             rx_conf->rx_free_thresh);
104         if (ret) {
105                 ENICPMD_LOG(ERR, "error in allocating rq\n");
106                 return ret;
107         }
108         return 0;
109 }
110
111 static void enic_vf_dev_rx_queue_release(void *rxq)
112 {
113         ENICPMD_FUNC_TRACE();
114         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
115                 return;
116         enic_free_rq(rxq);
117 }
118
119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
120 {
121         ENICPMD_FUNC_TRACE();
122         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123                 return -E_RTE_SECONDARY;
124         return 0;
125 }
126
127 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
128 {
129         struct enic_vf_representor *vf;
130         struct vnic_rq *data_rq;
131         int index, cq_idx;
132         struct enic *pf;
133         int ret;
134
135         ENICPMD_FUNC_TRACE();
136         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
137                 return -E_RTE_SECONDARY;
138
139         vf = eth_dev->data->dev_private;
140         pf = vf->pf;
141         /* Remove all packet filters so no ingress packets go to VF.
142          * When PF enables switchdev, it will ensure packet filters
143          * are removed.  So, this is not technically needed.
144          */
145         ENICPMD_LOG(DEBUG, "Clear packet filters");
146         ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
147         if (ret) {
148                 ENICPMD_LOG(ERR, "Cannot clear packet filters");
149                 return ret;
150         }
151
152         /* Start WQ: see enic_init_vnic_resources */
153         index = vf->pf_wq_idx;
154         cq_idx = vf->pf_wq_cq_idx;
155         vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
156         vnic_cq_init(&pf->cq[cq_idx],
157                      0 /* flow_control_enable */,
158                      1 /* color_enable */,
159                      0 /* cq_head */,
160                      0 /* cq_tail */,
161                      1 /* cq_tail_color */,
162                      0 /* interrupt_enable */,
163                      0 /* cq_entry_enable */,
164                      1 /* cq_message_enable */,
165                      0 /* interrupt offset */,
166                      (uint64_t)pf->wq[index].cqmsg_rz->iova);
167         /* enic_start_wq */
168         vnic_wq_enable(&pf->wq[index]);
169         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
170
171         /* Start RQ: see enic_init_vnic_resources */
172         index = vf->pf_rq_sop_idx;
173         cq_idx = enic_cq_rq(vf->pf, index);
174         vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
175         data_rq = &pf->rq[vf->pf_rq_data_idx];
176         if (data_rq->in_use)
177                 vnic_rq_init(data_rq, cq_idx, 1, 0);
178         vnic_cq_init(&pf->cq[cq_idx],
179                      0 /* flow_control_enable */,
180                      1 /* color_enable */,
181                      0 /* cq_head */,
182                      0 /* cq_tail */,
183                      1 /* cq_tail_color */,
184                      0,
185                      1 /* cq_entry_enable */,
186                      0 /* cq_message_enable */,
187                      0,
188                      0 /* cq_message_addr */);
189         /* enic_enable */
190         ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
191         if (ret) {
192                 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
193                 return ret;
194         }
195         ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
196         if (ret) {
197                 /* Release the allocated mbufs for the sop rq*/
198                 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
199                 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
200                 return ret;
201         }
202         enic_start_rq(pf, vf->pf_rq_sop_idx);
203         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
204         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
205         return 0;
206 }
207
208 static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
209 {
210         struct enic_vf_representor *vf;
211         struct vnic_rq *rq;
212         struct enic *pf;
213
214         ENICPMD_FUNC_TRACE();
215         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
216                 return;
217         /* Undo dev_start. Disable/clean WQ */
218         vf = eth_dev->data->dev_private;
219         pf = vf->pf;
220         vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
221         vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
222         vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
223         /* Disable/clean RQ */
224         rq = &pf->rq[vf->pf_rq_sop_idx];
225         vnic_rq_disable(rq);
226         vnic_rq_clean(rq, enic_free_rq_buf);
227         rq = &pf->rq[vf->pf_rq_data_idx];
228         if (rq->in_use) {
229                 vnic_rq_disable(rq);
230                 vnic_rq_clean(rq, enic_free_rq_buf);
231         }
232         vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
233         eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
234         eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
235 }
236
237 /*
238  * "close" is no-op for now and solely exists so that rte_eth_dev_close()
239  * can finish its own cleanup without errors.
240  */
241 static void enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
242 {
243         ENICPMD_FUNC_TRACE();
244         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
245                 return;
246 }
247
248 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
249         int wait_to_complete __rte_unused)
250 {
251         struct enic_vf_representor *vf;
252         struct rte_eth_link link;
253         struct enic *pf;
254
255         ENICPMD_FUNC_TRACE();
256         vf = eth_dev->data->dev_private;
257         pf = vf->pf;
258         /*
259          * Link status and speed are same as PF. Update PF status and then
260          * copy it to VF.
261          */
262         enic_link_update(pf->rte_dev);
263         rte_eth_linkstatus_get(pf->rte_dev, &link);
264         rte_eth_linkstatus_set(eth_dev, &link);
265         return 0;
266 }
267
268 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
269         struct rte_eth_stats *stats)
270 {
271         struct enic_vf_representor *vf;
272         struct vnic_stats *vs;
273         int err;
274
275         ENICPMD_FUNC_TRACE();
276         vf = eth_dev->data->dev_private;
277         /* Get VF stats via PF */
278         err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
279         if (err) {
280                 ENICPMD_LOG(ERR, "error in getting stats\n");
281                 return err;
282         }
283         stats->ipackets = vs->rx.rx_frames_ok;
284         stats->opackets = vs->tx.tx_frames_ok;
285         stats->ibytes = vs->rx.rx_bytes_ok;
286         stats->obytes = vs->tx.tx_bytes_ok;
287         stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
288         stats->oerrors = vs->tx.tx_errors;
289         stats->imissed = vs->rx.rx_no_bufs;
290         return 0;
291 }
292
293 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
294 {
295         struct enic_vf_representor *vf;
296         int err;
297
298         ENICPMD_FUNC_TRACE();
299         vf = eth_dev->data->dev_private;
300         /* Ask PF to clear VF stats */
301         err = vnic_dev_stats_clear(vf->enic.vdev);
302         if (err)
303                 ENICPMD_LOG(ERR, "error in clearing stats\n");
304         return err;
305 }
306
307 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
308         struct rte_eth_dev_info *device_info)
309 {
310         struct enic_vf_representor *vf;
311         struct enic *pf;
312
313         ENICPMD_FUNC_TRACE();
314         vf = eth_dev->data->dev_private;
315         pf = vf->pf;
316         device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
317         device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
318         device_info->min_rx_bufsize = ENIC_MIN_MTU;
319         /* Max packet size is same as PF */
320         device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
321         device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
322         /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
323         device_info->rx_offload_capa = 0;
324         device_info->tx_offload_capa = 0;
325         device_info->switch_info.name = pf->rte_dev->device->name;
326         device_info->switch_info.domain_id = vf->switch_domain_id;
327         device_info->switch_info.port_id = vf->vf_id;
328         return 0;
329 }
330
331 static void set_vf_packet_filter(struct enic_vf_representor *vf)
332 {
333         /* switchdev: packet filters are ignored */
334         if (vf->enic.switchdev_mode)
335                 return;
336         /* Ask PF to apply filters on VF */
337         vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
338                 1 /* bcast */, vf->promisc, vf->allmulti);
339 }
340
341 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
342 {
343         struct enic_vf_representor *vf;
344
345         ENICPMD_FUNC_TRACE();
346         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
347                 return -E_RTE_SECONDARY;
348         vf = eth_dev->data->dev_private;
349         vf->promisc = 1;
350         set_vf_packet_filter(vf);
351         return 0;
352 }
353
354 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
355 {
356         struct enic_vf_representor *vf;
357
358         ENICPMD_FUNC_TRACE();
359         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
360                 return -E_RTE_SECONDARY;
361         vf = eth_dev->data->dev_private;
362         vf->promisc = 0;
363         set_vf_packet_filter(vf);
364         return 0;
365 }
366
367 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
368 {
369         struct enic_vf_representor *vf;
370
371         ENICPMD_FUNC_TRACE();
372         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
373                 return -E_RTE_SECONDARY;
374         vf = eth_dev->data->dev_private;
375         vf->allmulti = 1;
376         set_vf_packet_filter(vf);
377         return 0;
378 }
379
380 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
381 {
382         struct enic_vf_representor *vf;
383
384         ENICPMD_FUNC_TRACE();
385         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
386                 return -E_RTE_SECONDARY;
387         vf = eth_dev->data->dev_private;
388         vf->allmulti = 0;
389         set_vf_packet_filter(vf);
390         return 0;
391 }
392
393 /*
394  * A minimal set of handlers.
395  * The representor can get/set a small set of VF settings via "proxy" devcmd.
396  * With proxy devcmd, the PF driver basically tells the VIC firmware to
397  * "perform this devcmd on that VF".
398  */
399 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
400         .allmulticast_enable  = enic_vf_allmulticast_enable,
401         .allmulticast_disable = enic_vf_allmulticast_disable,
402         .dev_configure        = enic_vf_dev_configure,
403         .dev_infos_get        = enic_vf_dev_infos_get,
404         .dev_start            = enic_vf_dev_start,
405         .dev_stop             = enic_vf_dev_stop,
406         .dev_close            = enic_vf_dev_close,
407         .link_update          = enic_vf_link_update,
408         .promiscuous_enable   = enic_vf_promiscuous_enable,
409         .promiscuous_disable  = enic_vf_promiscuous_disable,
410         .stats_get            = enic_vf_stats_get,
411         .stats_reset          = enic_vf_stats_reset,
412         .rx_queue_setup       = enic_vf_dev_rx_queue_setup,
413         .rx_queue_release     = enic_vf_dev_rx_queue_release,
414         .tx_queue_setup       = enic_vf_dev_tx_queue_setup,
415         .tx_queue_release     = enic_vf_dev_tx_queue_release,
416 };
417
418 static int get_vf_config(struct enic_vf_representor *vf)
419 {
420         struct vnic_enet_config *c;
421         struct enic *pf;
422         int switch_mtu;
423         int err;
424
425         c = &vf->config;
426         pf = vf->pf;
427         /* VF MAC */
428         err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
429         if (err) {
430                 ENICPMD_LOG(ERR, "error in getting MAC address\n");
431                 return err;
432         }
433         rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
434
435         /* VF MTU per its vNIC setting */
436         err = vnic_dev_spec(vf->enic.vdev,
437                             offsetof(struct vnic_enet_config, mtu),
438                             sizeof(c->mtu), &c->mtu);
439         if (err) {
440                 ENICPMD_LOG(ERR, "error in getting MTU\n");
441                 return err;
442         }
443         /*
444          * Blade switch (fabric interconnect) port's MTU. Assume the kernel
445          * enic driver runs on VF. That driver automatically adjusts its MTU
446          * according to the switch MTU.
447          */
448         switch_mtu = vnic_dev_mtu(pf->vdev);
449         vf->eth_dev->data->mtu = c->mtu;
450         if (switch_mtu > c->mtu)
451                 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
452         return 0;
453 }
454
455 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
456 {
457         struct enic_vf_representor *vf, *params;
458         struct rte_pci_device *pdev;
459         struct enic *pf, *vf_enic;
460         struct rte_pci_addr *addr;
461         int ret;
462
463         ENICPMD_FUNC_TRACE();
464         params = init_params;
465         vf = eth_dev->data->dev_private;
466         vf->switch_domain_id = params->switch_domain_id;
467         vf->vf_id = params->vf_id;
468         vf->eth_dev = eth_dev;
469         vf->pf = params->pf;
470         vf->allmulti = 1;
471         vf->promisc = 0;
472         pf = vf->pf;
473         vf->enic.switchdev_mode = pf->switchdev_mode;
474         /* Only switchdev is supported now */
475         RTE_ASSERT(vf->enic.switchdev_mode);
476         /* Allocate WQ, RQ, CQ for the representor */
477         vf->pf_wq_idx = vf_wq_idx(vf);
478         vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
479         vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
480         vf->pf_rq_data_idx = vf_rq_data_idx(vf);
481         /* Remove these assertions once queue allocation has an easy-to-use
482          * allocator API instead of index number calculations used throughout
483          * the driver..
484          */
485         RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
486         RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
487                    vf->pf_rq_sop_idx);
488         /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
489         pf->vf_required_wq++;
490         pf->vf_required_rq += 2; /* sop and data */
491         pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
492         ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
493                 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
494                 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
495         if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
496                 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
497                             " >= number of RQs (%u) in CIMC or UCSM",
498                             pf->conf_cq_count, pf->conf_rq_count);
499                 return -EINVAL;
500         }
501
502         /* Check for non-existent VFs */
503         pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
504         if (vf->vf_id >= pdev->max_vfs) {
505                 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
506                             vf->vf_id, pdev->max_vfs);
507                 return -ENODEV;
508         }
509
510         eth_dev->device->driver = pf->rte_dev->device->driver;
511         eth_dev->dev_ops = &enic_vf_representor_dev_ops;
512         eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR
513                 | RTE_ETH_DEV_CLOSE_REMOVE;
514         eth_dev->data->representor_id = vf->vf_id;
515         eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
516                 sizeof(struct rte_ether_addr) *
517                 ENIC_UNICAST_PERFECT_FILTERS, 0);
518         if (eth_dev->data->mac_addrs == NULL)
519                 return -ENOMEM;
520         /* Use 1 RX queue and 1 TX queue for representor path */
521         eth_dev->data->nb_rx_queues = 1;
522         eth_dev->data->nb_tx_queues = 1;
523         eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
524         eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
525         /* Initial link state copied from PF */
526         eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
527         /* Representor vdev to perform devcmd */
528         vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
529         if (vf->enic.vdev == NULL)
530                 return -ENOMEM;
531         ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
532         if (ret)
533                 return ret;
534         /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
535         ret = get_vf_config(vf);
536         if (ret)
537                 return ret;
538
539         /*
540          * Calculate VF BDF. The firmware ensures that PF BDF is always
541          * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
542          */
543         vf->bdf = pdev->addr;
544         vf->bdf.function += vf->vf_id + 1;
545
546         /* Copy a few fields used by enic_fm_flow */
547         vf_enic = &vf->enic;
548         vf_enic->switch_domain_id = vf->switch_domain_id;
549         vf_enic->flow_filter_mode = pf->flow_filter_mode;
550         vf_enic->rte_dev = eth_dev;
551         vf_enic->dev_data = eth_dev->data;
552         LIST_INIT(&vf_enic->flows);
553         LIST_INIT(&vf_enic->memzone_list);
554         rte_spinlock_init(&vf_enic->memzone_list_lock);
555         addr = &vf->bdf;
556         snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
557                  addr->domain, addr->bus, addr->devid, addr->function);
558         return 0;
559 }
560
561 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
562 {
563         struct enic_vf_representor *vf;
564
565         ENICPMD_FUNC_TRACE();
566         vf = eth_dev->data->dev_private;
567         vnic_dev_unregister(vf->enic.vdev);
568         return 0;
569 }