net/nfp: add NFDk option and queue function
[dpdk.git] / drivers / net / nfp / nfp_ethdev_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2014-2021 Netronome Systems, Inc.
3  * All rights reserved.
4  *
5  * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
6  */
7
8 /*
9  * vim:shiftwidth=8:noexpandtab
10  *
11  * @file dpdk/pmd/nfp_ethdev_vf.c
12  *
13  * Netronome vNIC  VF DPDK Poll-Mode Driver: Main entry point
14  */
15
16 #include <rte_alarm.h>
17
18 #include "nfpcore/nfp_mip.h"
19 #include "nfpcore/nfp_rtsym.h"
20
21 #include "nfp_common.h"
22 #include "nfp_rxtx.h"
23 #include "nfp_logs.h"
24 #include "nfp_ctrl.h"
25
26 static void
27 nfp_netvf_read_mac(struct nfp_net_hw *hw)
28 {
29         uint32_t tmp;
30
31         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
32         memcpy(&hw->mac_addr[0], &tmp, 4);
33
34         tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
35         memcpy(&hw->mac_addr[4], &tmp, 2);
36 }
37
38 static int
39 nfp_netvf_start(struct rte_eth_dev *dev)
40 {
41         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
42         struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
43         uint32_t new_ctrl, update = 0;
44         struct nfp_net_hw *hw;
45         struct rte_eth_conf *dev_conf;
46         struct rte_eth_rxmode *rxmode;
47         uint32_t intr_vector;
48         int ret;
49
50         hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
51
52         PMD_INIT_LOG(DEBUG, "Start");
53
54         /* Disabling queues just in case... */
55         nfp_net_disable_queues(dev);
56
57         /* Enabling the required queues in the device */
58         nfp_net_enable_queues(dev);
59
60         /* check and configure queue intr-vector mapping */
61         if (dev->data->dev_conf.intr_conf.rxq != 0) {
62                 if (rte_intr_type_get(intr_handle) ==
63                                                 RTE_INTR_HANDLE_UIO) {
64                         /*
65                          * Better not to share LSC with RX interrupts.
66                          * Unregistering LSC interrupt handler
67                          */
68                         rte_intr_callback_unregister(pci_dev->intr_handle,
69                                 nfp_net_dev_interrupt_handler, (void *)dev);
70
71                         if (dev->data->nb_rx_queues > 1) {
72                                 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
73                                              "supports 1 queue with UIO");
74                                 return -EIO;
75                         }
76                 }
77                 intr_vector = dev->data->nb_rx_queues;
78                 if (rte_intr_efd_enable(intr_handle, intr_vector))
79                         return -1;
80
81                 nfp_configure_rx_interrupt(dev, intr_handle);
82                 update = NFP_NET_CFG_UPDATE_MSIX;
83         }
84
85         rte_intr_enable(intr_handle);
86
87         new_ctrl = nfp_check_offloads(dev);
88
89         /* Writing configuration parameters in the device */
90         nfp_net_params_setup(hw);
91
92         dev_conf = &dev->data->dev_conf;
93         rxmode = &dev_conf->rxmode;
94
95         if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS) {
96                 nfp_net_rss_config_default(dev);
97                 update |= NFP_NET_CFG_UPDATE_RSS;
98                 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
99         }
100
101         /* Enable device */
102         new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
103
104         update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
105
106         if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
107                 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
108
109         nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
110         if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
111                 return -EIO;
112
113         /*
114          * Allocating rte mbufs for configured rx queues.
115          * This requires queues being enabled before
116          */
117         if (nfp_net_rx_freelist_setup(dev) < 0) {
118                 ret = -ENOMEM;
119                 goto error;
120         }
121
122         hw->ctrl = new_ctrl;
123
124         return 0;
125
126 error:
127         /*
128          * An error returned by this function should mean the app
129          * exiting and then the system releasing all the memory
130          * allocated even memory coming from hugepages.
131          *
132          * The device could be enabled at this point with some queues
133          * ready for getting packets. This is true if the call to
134          * nfp_net_rx_freelist_setup() succeeds for some queues but
135          * fails for subsequent queues.
136          *
137          * This should make the app exiting but better if we tell the
138          * device first.
139          */
140         nfp_net_disable_queues(dev);
141
142         return ret;
143 }
144
145 static int
146 nfp_netvf_stop(struct rte_eth_dev *dev)
147 {
148         struct nfp_net_txq *this_tx_q;
149         struct nfp_net_rxq *this_rx_q;
150         int i;
151
152         PMD_INIT_LOG(DEBUG, "Stop");
153
154         nfp_net_disable_queues(dev);
155
156         /* Clear queues */
157         for (i = 0; i < dev->data->nb_tx_queues; i++) {
158                 this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
159                 nfp_net_reset_tx_queue(this_tx_q);
160         }
161
162         for (i = 0; i < dev->data->nb_rx_queues; i++) {
163                 this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
164                 nfp_net_reset_rx_queue(this_rx_q);
165         }
166
167         return 0;
168 }
169
170 static int
171 nfp_netvf_set_link_up(struct rte_eth_dev *dev __rte_unused)
172 {
173         return -ENOTSUP;
174 }
175
176 /* Set the link down. */
177 static int
178 nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused)
179 {
180         return -ENOTSUP;
181 }
182
183 /* Reset and stop device. The device can not be restarted. */
184 static int
185 nfp_netvf_close(struct rte_eth_dev *dev)
186 {
187         struct rte_pci_device *pci_dev;
188         struct nfp_net_txq *this_tx_q;
189         struct nfp_net_rxq *this_rx_q;
190         int i;
191
192         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
193                 return 0;
194
195         PMD_INIT_LOG(DEBUG, "Close");
196
197         pci_dev = RTE_ETH_DEV_TO_PCI(dev);
198
199         /*
200          * We assume that the DPDK application is stopping all the
201          * threads/queues before calling the device close function.
202          */
203
204         nfp_net_disable_queues(dev);
205
206         /* Clear queues */
207         for (i = 0; i < dev->data->nb_tx_queues; i++) {
208                 this_tx_q =  (struct nfp_net_txq *)dev->data->tx_queues[i];
209                 nfp_net_reset_tx_queue(this_tx_q);
210                 nfp_net_tx_queue_release(dev, i);
211         }
212
213         for (i = 0; i < dev->data->nb_rx_queues; i++) {
214                 this_rx_q =  (struct nfp_net_rxq *)dev->data->rx_queues[i];
215                 nfp_net_reset_rx_queue(this_rx_q);
216                 nfp_net_rx_queue_release(dev, i);
217         }
218
219         rte_intr_disable(pci_dev->intr_handle);
220
221         /* unregister callback func from eal lib */
222         rte_intr_callback_unregister(pci_dev->intr_handle,
223                                      nfp_net_dev_interrupt_handler,
224                                      (void *)dev);
225
226         /* Cancel possible impending LSC work here before releasing the port*/
227         rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler,
228                              (void *)dev);
229
230         /*
231          * The ixgbe PMD disables the pcie master on the
232          * device. The i40e does not...
233          */
234
235         return 0;
236 }
237
238 /* Initialise and register VF driver with DPDK Application */
239 static const struct eth_dev_ops nfp_netvf_nfd3_eth_dev_ops = {
240         .dev_configure          = nfp_net_configure,
241         .dev_start              = nfp_netvf_start,
242         .dev_stop               = nfp_netvf_stop,
243         .dev_set_link_up        = nfp_netvf_set_link_up,
244         .dev_set_link_down      = nfp_netvf_set_link_down,
245         .dev_close              = nfp_netvf_close,
246         .promiscuous_enable     = nfp_net_promisc_enable,
247         .promiscuous_disable    = nfp_net_promisc_disable,
248         .link_update            = nfp_net_link_update,
249         .stats_get              = nfp_net_stats_get,
250         .stats_reset            = nfp_net_stats_reset,
251         .dev_infos_get          = nfp_net_infos_get,
252         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
253         .mtu_set                = nfp_net_dev_mtu_set,
254         .mac_addr_set           = nfp_net_set_mac_addr,
255         .vlan_offload_set       = nfp_net_vlan_offload_set,
256         .reta_update            = nfp_net_reta_update,
257         .reta_query             = nfp_net_reta_query,
258         .rss_hash_update        = nfp_net_rss_hash_update,
259         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
260         .rx_queue_setup         = nfp_net_rx_queue_setup,
261         .rx_queue_release       = nfp_net_rx_queue_release,
262         .tx_queue_setup         = nfp_net_nfd3_tx_queue_setup,
263         .tx_queue_release       = nfp_net_tx_queue_release,
264         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
265         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
266 };
267
268 static const struct eth_dev_ops nfp_netvf_nfdk_eth_dev_ops = {
269         .dev_configure          = nfp_net_configure,
270         .dev_start              = nfp_netvf_start,
271         .dev_stop               = nfp_netvf_stop,
272         .dev_set_link_up        = nfp_netvf_set_link_up,
273         .dev_set_link_down      = nfp_netvf_set_link_down,
274         .dev_close              = nfp_netvf_close,
275         .promiscuous_enable     = nfp_net_promisc_enable,
276         .promiscuous_disable    = nfp_net_promisc_disable,
277         .link_update            = nfp_net_link_update,
278         .stats_get              = nfp_net_stats_get,
279         .stats_reset            = nfp_net_stats_reset,
280         .dev_infos_get          = nfp_net_infos_get,
281         .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
282         .mtu_set                = nfp_net_dev_mtu_set,
283         .mac_addr_set           = nfp_net_set_mac_addr,
284         .vlan_offload_set       = nfp_net_vlan_offload_set,
285         .reta_update            = nfp_net_reta_update,
286         .reta_query             = nfp_net_reta_query,
287         .rss_hash_update        = nfp_net_rss_hash_update,
288         .rss_hash_conf_get      = nfp_net_rss_hash_conf_get,
289         .rx_queue_setup         = nfp_net_rx_queue_setup,
290         .rx_queue_release       = nfp_net_rx_queue_release,
291         .tx_queue_setup         = nfp_net_nfdk_tx_queue_setup,
292         .tx_queue_release       = nfp_net_tx_queue_release,
293         .rx_queue_intr_enable   = nfp_rx_queue_intr_enable,
294         .rx_queue_intr_disable  = nfp_rx_queue_intr_disable,
295 };
296
297 static inline int
298 nfp_netvf_ethdev_ops_mount(struct nfp_net_hw *hw, struct rte_eth_dev *eth_dev)
299 {
300         switch (NFD_CFG_CLASS_VER_of(hw->ver)) {
301         case NFP_NET_CFG_VERSION_DP_NFD3:
302                 eth_dev->dev_ops = &nfp_netvf_nfd3_eth_dev_ops;
303                 break;
304         case NFP_NET_CFG_VERSION_DP_NFDK:
305                 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 5) {
306                         PMD_DRV_LOG(ERR, "NFDK must use ABI 5 or newer, found: %d",
307                                 NFD_CFG_MAJOR_VERSION_of(hw->ver));
308                         return -EINVAL;
309                 }
310                 eth_dev->dev_ops = &nfp_netvf_nfdk_eth_dev_ops;
311                 break;
312         default:
313                 PMD_DRV_LOG(ERR, "The version of firmware is not correct.");
314                 return -EINVAL;
315         }
316
317         eth_dev->rx_queue_count = nfp_net_rx_queue_count;
318         eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
319         eth_dev->tx_pkt_burst = &nfp_net_nfd3_xmit_pkts;
320
321         return 0;
322 }
323
324 static int
325 nfp_netvf_init(struct rte_eth_dev *eth_dev)
326 {
327         struct rte_pci_device *pci_dev;
328         struct nfp_net_hw *hw;
329         struct rte_ether_addr *tmp_ether_addr;
330
331         uint64_t tx_bar_off = 0, rx_bar_off = 0;
332         uint32_t start_q;
333         int stride = 4;
334         int port = 0;
335         int err;
336
337         PMD_INIT_FUNC_TRACE();
338
339         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
340
341         /* NFP can not handle DMA addresses requiring more than 40 bits */
342         if (rte_mem_check_dma_mask(40)) {
343                 RTE_LOG(ERR, PMD,
344                         "device %s can not be used: restricted dma mask to 40 bits!\n",
345                         pci_dev->device.name);
346                 return -ENODEV;
347         }
348
349         hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
350
351         hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
352         if (hw->ctrl_bar == NULL) {
353                 PMD_DRV_LOG(ERR,
354                         "hw->ctrl_bar is NULL. BAR0 not configured");
355                 return -ENODEV;
356         }
357
358         PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
359
360         hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
361
362         if (nfp_netvf_ethdev_ops_mount(hw, eth_dev))
363                 return -EINVAL;
364
365         /* For secondary processes, the primary has done all the work */
366         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
367                 return 0;
368
369         rte_eth_copy_pci_info(eth_dev, pci_dev);
370
371         hw->device_id = pci_dev->id.device_id;
372         hw->vendor_id = pci_dev->id.vendor_id;
373         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
374         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
375
376         PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
377                      pci_dev->id.vendor_id, pci_dev->id.device_id,
378                      pci_dev->addr.domain, pci_dev->addr.bus,
379                      pci_dev->addr.devid, pci_dev->addr.function);
380
381         hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
382         hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
383
384         /* Work out where in the BAR the queues start. */
385         switch (pci_dev->id.device_id) {
386         case PCI_DEVICE_ID_NFP3800_VF_NIC:
387         case PCI_DEVICE_ID_NFP6000_VF_NIC:
388                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
389                 tx_bar_off = nfp_pci_queue(pci_dev, start_q);
390                 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
391                 rx_bar_off = nfp_pci_queue(pci_dev, start_q);
392                 break;
393         default:
394                 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
395                 err = -ENODEV;
396                 goto dev_err_ctrl_map;
397         }
398
399         PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
400         PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
401
402         hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
403                      tx_bar_off;
404         hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
405                      rx_bar_off;
406
407         PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
408                      hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
409
410         nfp_net_cfg_queue_setup(hw);
411
412         /* Get some of the read-only fields from the config BAR */
413         hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
414         hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
415         hw->mtu = RTE_ETHER_MTU;
416
417         /* VLAN insertion is incompatible with LSOv2 */
418         if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
419                 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
420
421         if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
422                 hw->rx_offset = NFP_NET_RX_OFFSET;
423         else
424                 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
425
426         PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
427                            NFD_CFG_MAJOR_VERSION_of(hw->ver),
428                            NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
429
430         PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
431                      hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
432                      hw->cap & NFP_NET_CFG_CTRL_L2BC    ? "L2BCFILT " : "",
433                      hw->cap & NFP_NET_CFG_CTRL_L2MC    ? "L2MCFILT " : "",
434                      hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
435                      hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
436                      hw->cap & NFP_NET_CFG_CTRL_RXVLAN  ? "RXVLAN "  : "",
437                      hw->cap & NFP_NET_CFG_CTRL_TXVLAN  ? "TXVLAN "  : "",
438                      hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
439                      hw->cap & NFP_NET_CFG_CTRL_GATHER  ? "GATHER "  : "",
440                      hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR "  : "",
441                      hw->cap & NFP_NET_CFG_CTRL_LSO     ? "TSO "     : "",
442                      hw->cap & NFP_NET_CFG_CTRL_LSO2     ? "TSOv2 "     : "",
443                      hw->cap & NFP_NET_CFG_CTRL_RSS     ? "RSS "     : "",
444                      hw->cap & NFP_NET_CFG_CTRL_RSS2     ? "RSSv2 "     : "");
445
446         hw->ctrl = 0;
447
448         hw->stride_rx = stride;
449         hw->stride_tx = stride;
450
451         PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
452                      hw->max_rx_queues, hw->max_tx_queues);
453
454         /* Initializing spinlock for reconfigs */
455         rte_spinlock_init(&hw->reconfig_lock);
456
457         /* Allocating memory for mac addr */
458         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
459                                                RTE_ETHER_ADDR_LEN, 0);
460         if (eth_dev->data->mac_addrs == NULL) {
461                 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
462                 err = -ENOMEM;
463                 goto dev_err_queues_map;
464         }
465
466         nfp_netvf_read_mac(hw);
467
468         tmp_ether_addr = (struct rte_ether_addr *)&hw->mac_addr;
469         if (!rte_is_valid_assigned_ether_addr(tmp_ether_addr)) {
470                 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
471                                    port);
472                 /* Using random mac addresses for VFs */
473                 rte_eth_random_addr(&hw->mac_addr[0]);
474                 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
475         }
476
477         /* Copying mac address to DPDK eth_dev struct */
478         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
479                         &eth_dev->data->mac_addrs[0]);
480
481         if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
482                 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
483
484         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
485
486         PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
487                      "mac=%02x:%02x:%02x:%02x:%02x:%02x",
488                      eth_dev->data->port_id, pci_dev->id.vendor_id,
489                      pci_dev->id.device_id,
490                      hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
491                      hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
492
493         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
494                 /* Registering LSC interrupt handler */
495                 rte_intr_callback_register(pci_dev->intr_handle,
496                                            nfp_net_dev_interrupt_handler,
497                                            (void *)eth_dev);
498                 /* Telling the firmware about the LSC interrupt entry */
499                 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
500                 /* Recording current stats counters values */
501                 nfp_net_stats_reset(eth_dev);
502         }
503
504         return 0;
505
506 dev_err_queues_map:
507                 nfp_cpp_area_free(hw->hwqueues_area);
508 dev_err_ctrl_map:
509                 nfp_cpp_area_free(hw->ctrl_area);
510
511         return err;
512 }
513
514 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
515         {
516                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
517                                PCI_DEVICE_ID_NFP3800_VF_NIC)
518         },
519         {
520                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
521                                PCI_DEVICE_ID_NFP6000_VF_NIC)
522         },
523         {
524                 .vendor_id = 0,
525         },
526 };
527
528 static int nfp_vf_pci_uninit(struct rte_eth_dev *eth_dev)
529 {
530         /* VF cleanup, just free private port data */
531         return nfp_netvf_close(eth_dev);
532 }
533
534 static int eth_nfp_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
535         struct rte_pci_device *pci_dev)
536 {
537         return rte_eth_dev_pci_generic_probe(pci_dev,
538                 sizeof(struct nfp_net_adapter), nfp_netvf_init);
539 }
540
541 static int eth_nfp_vf_pci_remove(struct rte_pci_device *pci_dev)
542 {
543         return rte_eth_dev_pci_generic_remove(pci_dev, nfp_vf_pci_uninit);
544 }
545
546 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
547         .id_table = pci_id_nfp_vf_net_map,
548         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
549         .probe = eth_nfp_vf_pci_probe,
550         .remove = eth_nfp_vf_pci_remove,
551 };
552
553 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
554 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
555 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
556 /*
557  * Local variables:
558  * c-file-style: "Linux"
559  * indent-tabs-mode: t
560  * End:
561  */