1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2019 Cisco Systems, Inc. All rights reserved.
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_flow_driver.h>
14 #include <rte_kvargs.h>
16 #include <rte_string_fns.h>
18 #include "enic_compat.h"
21 #include "vnic_enet.h"
22 #include "vnic_intr.h"
27 static uint16_t enic_vf_recv_pkts(void *rx_queue,
28 struct rte_mbuf **rx_pkts,
31 return enic_recv_pkts(rx_queue, rx_pkts, nb_pkts);
34 static uint16_t enic_vf_xmit_pkts(void *tx_queue,
35 struct rte_mbuf **tx_pkts,
38 return enic_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
41 static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
44 unsigned int socket_id,
45 const struct rte_eth_txconf *tx_conf)
47 struct enic_vf_representor *vf;
53 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
54 return -E_RTE_SECONDARY;
55 /* Only one queue now */
58 vf = eth_dev->data->dev_private;
60 wq = &pf->wq[vf->pf_wq_idx];
61 wq->offloads = tx_conf->offloads |
62 eth_dev->data->dev_conf.txmode.offloads;
63 eth_dev->data->tx_queues[0] = (void *)wq;
64 /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */
65 err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc);
67 ENICPMD_LOG(ERR, "error in allocating wq\n");
73 static void enic_vf_dev_tx_queue_release(void *txq)
76 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
81 static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
84 unsigned int socket_id,
85 const struct rte_eth_rxconf *rx_conf,
86 struct rte_mempool *mp)
88 struct enic_vf_representor *vf;
93 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
94 return -E_RTE_SECONDARY;
95 /* Only 1 queue now */
98 vf = eth_dev->data->dev_private;
100 eth_dev->data->rx_queues[queue_idx] =
101 (void *)&pf->rq[vf->pf_rq_sop_idx];
102 ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc,
103 rx_conf->rx_free_thresh);
105 ENICPMD_LOG(ERR, "error in allocating rq\n");
111 static void enic_vf_dev_rx_queue_release(void *rxq)
113 ENICPMD_FUNC_TRACE();
114 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
119 static int enic_vf_dev_configure(struct rte_eth_dev *eth_dev __rte_unused)
121 ENICPMD_FUNC_TRACE();
122 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
123 return -E_RTE_SECONDARY;
128 setup_rep_vf_fwd(struct enic_vf_representor *vf)
132 ENICPMD_FUNC_TRACE();
133 /* Representor -> VF rule
134 * Egress packets from this representor are on the representor's WQ.
135 * So, loop back that WQ to VF.
137 ret = enic_fm_add_rep2vf_flow(vf);
139 ENICPMD_LOG(ERR, "Cannot create representor->VF flow");
142 /* VF -> representor rule
143 * Packets from VF loop back to the representor, unless they match
146 ret = enic_fm_add_vf2rep_flow(vf);
148 ENICPMD_LOG(ERR, "Cannot create VF->representor flow");
154 static int enic_vf_dev_start(struct rte_eth_dev *eth_dev)
156 struct enic_vf_representor *vf;
157 struct vnic_rq *data_rq;
162 ENICPMD_FUNC_TRACE();
163 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
164 return -E_RTE_SECONDARY;
166 vf = eth_dev->data->dev_private;
168 /* Get representor flowman for flow API and representor path */
169 ret = enic_fm_init(&vf->enic);
172 /* Set up implicit flow rules to forward between representor and VF */
173 ret = setup_rep_vf_fwd(vf);
175 ENICPMD_LOG(ERR, "Cannot set up representor-VF flows");
178 /* Remove all packet filters so no ingress packets go to VF.
179 * When PF enables switchdev, it will ensure packet filters
180 * are removed. So, this is not technically needed.
182 ENICPMD_LOG(DEBUG, "Clear packet filters");
183 ret = vnic_dev_packet_filter(vf->enic.vdev, 0, 0, 0, 0, 0);
185 ENICPMD_LOG(ERR, "Cannot clear packet filters");
189 /* Start WQ: see enic_init_vnic_resources */
190 index = vf->pf_wq_idx;
191 cq_idx = vf->pf_wq_cq_idx;
192 vnic_wq_init(&pf->wq[index], cq_idx, 1, 0);
193 vnic_cq_init(&pf->cq[cq_idx],
194 0 /* flow_control_enable */,
195 1 /* color_enable */,
198 1 /* cq_tail_color */,
199 0 /* interrupt_enable */,
200 0 /* cq_entry_enable */,
201 1 /* cq_message_enable */,
202 0 /* interrupt offset */,
203 (uint64_t)pf->wq[index].cqmsg_rz->iova);
205 vnic_wq_enable(&pf->wq[index]);
206 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
208 /* Start RQ: see enic_init_vnic_resources */
209 index = vf->pf_rq_sop_idx;
210 cq_idx = enic_cq_rq(vf->pf, index);
211 vnic_rq_init(&pf->rq[index], cq_idx, 1, 0);
212 data_rq = &pf->rq[vf->pf_rq_data_idx];
214 vnic_rq_init(data_rq, cq_idx, 1, 0);
215 vnic_cq_init(&pf->cq[cq_idx],
216 0 /* flow_control_enable */,
217 1 /* color_enable */,
220 1 /* cq_tail_color */,
222 1 /* cq_entry_enable */,
223 0 /* cq_message_enable */,
225 0 /* cq_message_addr */);
227 ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]);
229 ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n");
232 ret = enic_alloc_rx_queue_mbufs(pf, data_rq);
234 /* Release the allocated mbufs for the sop rq*/
235 enic_rxmbuf_queue_release(pf, &pf->rq[index]);
236 ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n");
239 enic_start_rq(pf, vf->pf_rq_sop_idx);
240 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
241 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STARTED;
245 static void enic_vf_dev_stop(struct rte_eth_dev *eth_dev)
247 struct enic_vf_representor *vf;
251 ENICPMD_FUNC_TRACE();
252 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
254 /* Undo dev_start. Disable/clean WQ */
255 vf = eth_dev->data->dev_private;
257 vnic_wq_disable(&pf->wq[vf->pf_wq_idx]);
258 vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf);
259 vnic_cq_clean(&pf->cq[vf->pf_wq_cq_idx]);
260 /* Disable/clean RQ */
261 rq = &pf->rq[vf->pf_rq_sop_idx];
263 vnic_rq_clean(rq, enic_free_rq_buf);
264 rq = &pf->rq[vf->pf_rq_data_idx];
267 vnic_rq_clean(rq, enic_free_rq_buf);
269 vnic_cq_clean(&pf->cq[enic_cq_rq(vf->pf, vf->pf_rq_sop_idx)]);
270 eth_dev->data->tx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
271 eth_dev->data->rx_queue_state[0] = RTE_ETH_QUEUE_STATE_STOPPED;
272 /* Clean up representor flowman */
273 enic_fm_destroy(&vf->enic);
277 * "close" is no-op for now and solely exists so that rte_eth_dev_close()
278 * can finish its own cleanup without errors.
280 static int enic_vf_dev_close(struct rte_eth_dev *eth_dev __rte_unused)
282 ENICPMD_FUNC_TRACE();
283 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
289 adjust_flow_attr(const struct rte_flow_attr *attrs,
290 struct rte_flow_attr *vf_attrs,
291 struct rte_flow_error *error)
294 return rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_ATTR,
296 NULL, "no attribute specified");
299 * Swap ingress and egress as the firmware view of direction
300 * is the opposite of the representor.
303 if (attrs->ingress && !attrs->egress) {
304 vf_attrs->ingress = 0;
305 vf_attrs->egress = 1;
308 return rte_flow_error_set(error, ENOTSUP,
309 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
310 "representor only supports ingress");
314 enic_vf_flow_validate(struct rte_eth_dev *dev,
315 const struct rte_flow_attr *attrs,
316 const struct rte_flow_item pattern[],
317 const struct rte_flow_action actions[],
318 struct rte_flow_error *error)
320 struct rte_flow_attr vf_attrs;
323 ret = adjust_flow_attr(attrs, &vf_attrs, error);
327 return enic_fm_flow_ops.validate(dev, attrs, pattern, actions, error);
330 static struct rte_flow *
331 enic_vf_flow_create(struct rte_eth_dev *dev,
332 const struct rte_flow_attr *attrs,
333 const struct rte_flow_item pattern[],
334 const struct rte_flow_action actions[],
335 struct rte_flow_error *error)
337 struct rte_flow_attr vf_attrs;
339 if (adjust_flow_attr(attrs, &vf_attrs, error))
342 return enic_fm_flow_ops.create(dev, attrs, pattern, actions, error);
346 enic_vf_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
347 struct rte_flow_error *error)
349 return enic_fm_flow_ops.destroy(dev, flow, error);
353 enic_vf_flow_query(struct rte_eth_dev *dev,
354 struct rte_flow *flow,
355 const struct rte_flow_action *actions,
357 struct rte_flow_error *error)
359 return enic_fm_flow_ops.query(dev, flow, actions, data, error);
363 enic_vf_flow_flush(struct rte_eth_dev *dev,
364 struct rte_flow_error *error)
366 return enic_fm_flow_ops.flush(dev, error);
369 static const struct rte_flow_ops enic_vf_flow_ops = {
370 .validate = enic_vf_flow_validate,
371 .create = enic_vf_flow_create,
372 .destroy = enic_vf_flow_destroy,
373 .flush = enic_vf_flow_flush,
374 .query = enic_vf_flow_query,
378 enic_vf_filter_ctrl(struct rte_eth_dev *eth_dev,
379 enum rte_filter_type filter_type,
380 enum rte_filter_op filter_op,
383 struct enic_vf_representor *vf;
386 ENICPMD_FUNC_TRACE();
387 vf = eth_dev->data->dev_private;
388 switch (filter_type) {
389 case RTE_ETH_FILTER_GENERIC:
390 if (filter_op != RTE_ETH_FILTER_GET)
392 if (vf->enic.flow_filter_mode == FILTER_FLOWMAN) {
393 *(const void **)arg = &enic_vf_flow_ops;
395 ENICPMD_LOG(WARNING, "VF representors require flowman support for rte_flow API");
400 ENICPMD_LOG(WARNING, "Filter type (%d) not supported",
408 static int enic_vf_link_update(struct rte_eth_dev *eth_dev,
409 int wait_to_complete __rte_unused)
411 struct enic_vf_representor *vf;
412 struct rte_eth_link link;
415 ENICPMD_FUNC_TRACE();
416 vf = eth_dev->data->dev_private;
419 * Link status and speed are same as PF. Update PF status and then
422 enic_link_update(pf->rte_dev);
423 rte_eth_linkstatus_get(pf->rte_dev, &link);
424 rte_eth_linkstatus_set(eth_dev, &link);
428 static int enic_vf_stats_get(struct rte_eth_dev *eth_dev,
429 struct rte_eth_stats *stats)
431 struct enic_vf_representor *vf;
432 struct vnic_stats *vs;
435 ENICPMD_FUNC_TRACE();
436 vf = eth_dev->data->dev_private;
437 /* Get VF stats via PF */
438 err = vnic_dev_stats_dump(vf->enic.vdev, &vs);
440 ENICPMD_LOG(ERR, "error in getting stats\n");
443 stats->ipackets = vs->rx.rx_frames_ok;
444 stats->opackets = vs->tx.tx_frames_ok;
445 stats->ibytes = vs->rx.rx_bytes_ok;
446 stats->obytes = vs->tx.tx_bytes_ok;
447 stats->ierrors = vs->rx.rx_errors + vs->rx.rx_drop;
448 stats->oerrors = vs->tx.tx_errors;
449 stats->imissed = vs->rx.rx_no_bufs;
453 static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev)
455 struct enic_vf_representor *vf;
458 ENICPMD_FUNC_TRACE();
459 vf = eth_dev->data->dev_private;
460 /* Ask PF to clear VF stats */
461 err = vnic_dev_stats_clear(vf->enic.vdev);
463 ENICPMD_LOG(ERR, "error in clearing stats\n");
467 static int enic_vf_dev_infos_get(struct rte_eth_dev *eth_dev,
468 struct rte_eth_dev_info *device_info)
470 struct enic_vf_representor *vf;
473 ENICPMD_FUNC_TRACE();
474 vf = eth_dev->data->dev_private;
476 device_info->max_rx_queues = eth_dev->data->nb_rx_queues;
477 device_info->max_tx_queues = eth_dev->data->nb_tx_queues;
478 device_info->min_rx_bufsize = ENIC_MIN_MTU;
479 /* Max packet size is same as PF */
480 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(pf->max_mtu);
481 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
482 /* No offload capa, RSS, etc. until Tx/Rx handlers are added */
483 device_info->rx_offload_capa = 0;
484 device_info->tx_offload_capa = 0;
485 device_info->switch_info.name = pf->rte_dev->device->name;
486 device_info->switch_info.domain_id = vf->switch_domain_id;
487 device_info->switch_info.port_id = vf->vf_id;
491 static void set_vf_packet_filter(struct enic_vf_representor *vf)
493 /* switchdev: packet filters are ignored */
494 if (vf->enic.switchdev_mode)
496 /* Ask PF to apply filters on VF */
497 vnic_dev_packet_filter(vf->enic.vdev, 1 /* unicast */, 1 /* mcast */,
498 1 /* bcast */, vf->promisc, vf->allmulti);
501 static int enic_vf_promiscuous_enable(struct rte_eth_dev *eth_dev)
503 struct enic_vf_representor *vf;
505 ENICPMD_FUNC_TRACE();
506 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
507 return -E_RTE_SECONDARY;
508 vf = eth_dev->data->dev_private;
510 set_vf_packet_filter(vf);
514 static int enic_vf_promiscuous_disable(struct rte_eth_dev *eth_dev)
516 struct enic_vf_representor *vf;
518 ENICPMD_FUNC_TRACE();
519 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
520 return -E_RTE_SECONDARY;
521 vf = eth_dev->data->dev_private;
523 set_vf_packet_filter(vf);
527 static int enic_vf_allmulticast_enable(struct rte_eth_dev *eth_dev)
529 struct enic_vf_representor *vf;
531 ENICPMD_FUNC_TRACE();
532 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
533 return -E_RTE_SECONDARY;
534 vf = eth_dev->data->dev_private;
536 set_vf_packet_filter(vf);
540 static int enic_vf_allmulticast_disable(struct rte_eth_dev *eth_dev)
542 struct enic_vf_representor *vf;
544 ENICPMD_FUNC_TRACE();
545 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
546 return -E_RTE_SECONDARY;
547 vf = eth_dev->data->dev_private;
549 set_vf_packet_filter(vf);
554 * A minimal set of handlers.
555 * The representor can get/set a small set of VF settings via "proxy" devcmd.
556 * With proxy devcmd, the PF driver basically tells the VIC firmware to
557 * "perform this devcmd on that VF".
559 static const struct eth_dev_ops enic_vf_representor_dev_ops = {
560 .allmulticast_enable = enic_vf_allmulticast_enable,
561 .allmulticast_disable = enic_vf_allmulticast_disable,
562 .dev_configure = enic_vf_dev_configure,
563 .dev_infos_get = enic_vf_dev_infos_get,
564 .dev_start = enic_vf_dev_start,
565 .dev_stop = enic_vf_dev_stop,
566 .dev_close = enic_vf_dev_close,
567 .filter_ctrl = enic_vf_filter_ctrl,
568 .link_update = enic_vf_link_update,
569 .promiscuous_enable = enic_vf_promiscuous_enable,
570 .promiscuous_disable = enic_vf_promiscuous_disable,
571 .stats_get = enic_vf_stats_get,
572 .stats_reset = enic_vf_stats_reset,
573 .rx_queue_setup = enic_vf_dev_rx_queue_setup,
574 .rx_queue_release = enic_vf_dev_rx_queue_release,
575 .tx_queue_setup = enic_vf_dev_tx_queue_setup,
576 .tx_queue_release = enic_vf_dev_tx_queue_release,
579 static int get_vf_config(struct enic_vf_representor *vf)
581 struct vnic_enet_config *c;
589 err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes);
591 ENICPMD_LOG(ERR, "error in getting MAC address\n");
594 rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs);
596 /* VF MTU per its vNIC setting */
597 err = vnic_dev_spec(vf->enic.vdev,
598 offsetof(struct vnic_enet_config, mtu),
599 sizeof(c->mtu), &c->mtu);
601 ENICPMD_LOG(ERR, "error in getting MTU\n");
605 * Blade switch (fabric interconnect) port's MTU. Assume the kernel
606 * enic driver runs on VF. That driver automatically adjusts its MTU
607 * according to the switch MTU.
609 switch_mtu = vnic_dev_mtu(pf->vdev);
610 vf->eth_dev->data->mtu = c->mtu;
611 if (switch_mtu > c->mtu)
612 vf->eth_dev->data->mtu = RTE_MIN(ENIC_MAX_MTU, switch_mtu);
616 int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
618 struct enic_vf_representor *vf, *params;
619 struct rte_pci_device *pdev;
620 struct enic *pf, *vf_enic;
621 struct rte_pci_addr *addr;
624 ENICPMD_FUNC_TRACE();
625 params = init_params;
626 vf = eth_dev->data->dev_private;
627 vf->switch_domain_id = params->switch_domain_id;
628 vf->vf_id = params->vf_id;
629 vf->eth_dev = eth_dev;
634 vf->enic.switchdev_mode = pf->switchdev_mode;
635 /* Only switchdev is supported now */
636 RTE_ASSERT(vf->enic.switchdev_mode);
637 /* Allocate WQ, RQ, CQ for the representor */
638 vf->pf_wq_idx = vf_wq_idx(vf);
639 vf->pf_wq_cq_idx = vf_wq_cq_idx(vf);
640 vf->pf_rq_sop_idx = vf_rq_sop_idx(vf);
641 vf->pf_rq_data_idx = vf_rq_data_idx(vf);
642 /* Remove these assertions once queue allocation has an easy-to-use
643 * allocator API instead of index number calculations used throughout
646 RTE_ASSERT(enic_cq_rq(pf, vf->pf_rq_sop_idx) == vf->pf_rq_sop_idx);
647 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(vf->pf_rq_sop_idx) ==
649 /* RX handlers use enic_cq_rq(sop) to get CQ, so do not save it */
650 pf->vf_required_wq++;
651 pf->vf_required_rq += 2; /* sop and data */
652 pf->vf_required_cq += 2; /* 1 for rq sop and 1 for wq */
653 ENICPMD_LOG(DEBUG, "vf_id %u wq %u rq_sop %u rq_data %u wq_cq %u rq_cq %u",
654 vf->vf_id, vf->pf_wq_idx, vf->pf_rq_sop_idx, vf->pf_rq_data_idx,
655 vf->pf_wq_cq_idx, enic_cq_rq(pf, vf->pf_rq_sop_idx));
656 if (enic_cq_rq(pf, vf->pf_rq_sop_idx) >= pf->conf_cq_count) {
657 ENICPMD_LOG(ERR, "Insufficient CQs. Please ensure number of CQs (%u)"
658 " >= number of RQs (%u) in CIMC or UCSM",
659 pf->conf_cq_count, pf->conf_rq_count);
663 /* Check for non-existent VFs */
664 pdev = RTE_ETH_DEV_TO_PCI(pf->rte_dev);
665 if (vf->vf_id >= pdev->max_vfs) {
666 ENICPMD_LOG(ERR, "VF ID is invalid. vf_id %u max_vfs %u",
667 vf->vf_id, pdev->max_vfs);
671 eth_dev->device->driver = pf->rte_dev->device->driver;
672 eth_dev->dev_ops = &enic_vf_representor_dev_ops;
673 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
674 eth_dev->data->representor_id = vf->vf_id;
675 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
676 sizeof(struct rte_ether_addr) *
677 ENIC_UNICAST_PERFECT_FILTERS, 0);
678 if (eth_dev->data->mac_addrs == NULL)
680 /* Use 1 RX queue and 1 TX queue for representor path */
681 eth_dev->data->nb_rx_queues = 1;
682 eth_dev->data->nb_tx_queues = 1;
683 eth_dev->rx_pkt_burst = &enic_vf_recv_pkts;
684 eth_dev->tx_pkt_burst = &enic_vf_xmit_pkts;
685 /* Initial link state copied from PF */
686 eth_dev->data->dev_link = pf->rte_dev->data->dev_link;
687 /* Representor vdev to perform devcmd */
688 vf->enic.vdev = vnic_vf_rep_register(&vf->enic, pf->vdev, vf->vf_id);
689 if (vf->enic.vdev == NULL)
691 ret = vnic_dev_alloc_stats_mem(vf->enic.vdev);
694 /* Get/copy VF vNIC MAC, MTU, etc. into eth_dev */
695 ret = get_vf_config(vf);
700 * Calculate VF BDF. The firmware ensures that PF BDF is always
701 * bus:dev.0, and VF BDFs are dev.1, dev.2, and so on.
703 vf->bdf = pdev->addr;
704 vf->bdf.function += vf->vf_id + 1;
706 /* Copy a few fields used by enic_fm_flow */
708 vf_enic->switch_domain_id = vf->switch_domain_id;
709 vf_enic->flow_filter_mode = pf->flow_filter_mode;
710 vf_enic->rte_dev = eth_dev;
711 vf_enic->dev_data = eth_dev->data;
712 LIST_INIT(&vf_enic->flows);
713 LIST_INIT(&vf_enic->memzone_list);
714 rte_spinlock_init(&vf_enic->memzone_list_lock);
716 snprintf(vf_enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
717 addr->domain, addr->bus, addr->devid, addr->function);
721 int enic_vf_representor_uninit(struct rte_eth_dev *eth_dev)
723 struct enic_vf_representor *vf;
725 ENICPMD_FUNC_TRACE();
726 vf = eth_dev->data->dev_private;
727 vnic_dev_unregister(vf->enic.vdev);