net/ngbe: support Rx queue setup/release
[dpdk.git] / drivers / net / ngbe / ngbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9
10 #include <rte_alarm.h>
11
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18
19 static void ngbe_dev_interrupt_handler(void *param);
20 static void ngbe_dev_interrupt_delayed_handler(void *param);
21
22 /*
23  * The set of PCI devices this driver supports
24  */
25 static const struct rte_pci_id pci_id_ngbe_map[] = {
26         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
27         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
28         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
29         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
30         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
31         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
32         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
33         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
34         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
35         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
36         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
37         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
38         { .vendor_id = 0, /* sentinel */ },
39 };
40
41 static const struct rte_eth_desc_lim rx_desc_lim = {
42         .nb_max = NGBE_RING_DESC_MAX,
43         .nb_min = NGBE_RING_DESC_MIN,
44         .nb_align = NGBE_RXD_ALIGN,
45 };
46
47 static const struct eth_dev_ops ngbe_eth_dev_ops;
48
49 static inline void
50 ngbe_enable_intr(struct rte_eth_dev *dev)
51 {
52         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
53         struct ngbe_hw *hw = ngbe_dev_hw(dev);
54
55         wr32(hw, NGBE_IENMISC, intr->mask_misc);
56         wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
57         ngbe_flush(hw);
58 }
59
60 static void
61 ngbe_disable_intr(struct ngbe_hw *hw)
62 {
63         PMD_INIT_FUNC_TRACE();
64
65         wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
66         ngbe_flush(hw);
67 }
68
69 /*
70  * Ensure that all locks are released before first NVM or PHY access
71  */
72 static void
73 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
74 {
75         uint16_t mask;
76
77         /*
78          * These ones are more tricky since they are common to all ports; but
79          * swfw_sync retries last long enough (1s) to be almost sure that if
80          * lock can not be taken it is due to an improper lock of the
81          * semaphore.
82          */
83         mask = NGBE_MNGSEM_SWPHY |
84                NGBE_MNGSEM_SWMBX |
85                NGBE_MNGSEM_SWFLASH;
86         if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
87                 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
88
89         hw->mac.release_swfw_sync(hw, mask);
90 }
91
92 static int
93 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
94 {
95         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
96         struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
97         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
98         const struct rte_memzone *mz;
99         uint32_t ctrl_ext;
100         int err;
101
102         PMD_INIT_FUNC_TRACE();
103
104         eth_dev->dev_ops = &ngbe_eth_dev_ops;
105
106         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
107                 return 0;
108
109         rte_eth_copy_pci_info(eth_dev, pci_dev);
110
111         /* Vendor and Device ID need to be set before init of shared code */
112         hw->device_id = pci_dev->id.device_id;
113         hw->vendor_id = pci_dev->id.vendor_id;
114         hw->sub_system_id = pci_dev->id.subsystem_device_id;
115         ngbe_map_device_id(hw);
116         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
117
118         /* Reserve memory for interrupt status block */
119         mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
120                 NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
121         if (mz == NULL)
122                 return -ENOMEM;
123
124         hw->isb_dma = TMZ_PADDR(mz);
125         hw->isb_mem = TMZ_VADDR(mz);
126
127         /* Initialize the shared code (base driver) */
128         err = ngbe_init_shared_code(hw);
129         if (err != 0) {
130                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
131                 return -EIO;
132         }
133
134         /* Unlock any pending hardware semaphore */
135         ngbe_swfw_lock_reset(hw);
136
137         err = hw->rom.init_params(hw);
138         if (err != 0) {
139                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
140                 return -EIO;
141         }
142
143         /* Make sure we have a good EEPROM before we read from it */
144         err = hw->rom.validate_checksum(hw, NULL);
145         if (err != 0) {
146                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
147                 return -EIO;
148         }
149
150         err = hw->mac.init_hw(hw);
151         if (err != 0) {
152                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
153                 return -EIO;
154         }
155
156         /* disable interrupt */
157         ngbe_disable_intr(hw);
158
159         /* Allocate memory for storing MAC addresses */
160         eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
161                                                hw->mac.num_rar_entries, 0);
162         if (eth_dev->data->mac_addrs == NULL) {
163                 PMD_INIT_LOG(ERR,
164                              "Failed to allocate %u bytes needed to store MAC addresses",
165                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
166                 return -ENOMEM;
167         }
168
169         /* Copy the permanent MAC address */
170         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
171                         &eth_dev->data->mac_addrs[0]);
172
173         /* Allocate memory for storing hash filter MAC addresses */
174         eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
175                         RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
176         if (eth_dev->data->hash_mac_addrs == NULL) {
177                 PMD_INIT_LOG(ERR,
178                              "Failed to allocate %d bytes needed to store MAC addresses",
179                              RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
180                 rte_free(eth_dev->data->mac_addrs);
181                 eth_dev->data->mac_addrs = NULL;
182                 return -ENOMEM;
183         }
184
185         ctrl_ext = rd32(hw, NGBE_PORTCTL);
186         /* let hardware know driver is loaded */
187         ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
188         /* Set PF Reset Done bit so PF/VF Mail Ops can work */
189         ctrl_ext |= NGBE_PORTCTL_RSTDONE;
190         wr32(hw, NGBE_PORTCTL, ctrl_ext);
191         ngbe_flush(hw);
192
193         rte_intr_callback_register(intr_handle,
194                                    ngbe_dev_interrupt_handler, eth_dev);
195
196         /* enable uio/vfio intr/eventfd mapping */
197         rte_intr_enable(intr_handle);
198
199         /* enable support intr */
200         ngbe_enable_intr(eth_dev);
201
202         return 0;
203 }
204
205 static int
206 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
207 {
208         PMD_INIT_FUNC_TRACE();
209
210         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
211                 return 0;
212
213         ngbe_dev_close(eth_dev);
214
215         return -EINVAL;
216 }
217
218 static int
219 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
220                 struct rte_pci_device *pci_dev)
221 {
222         return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
223                         sizeof(struct ngbe_adapter),
224                         eth_dev_pci_specific_init, pci_dev,
225                         eth_ngbe_dev_init, NULL);
226 }
227
228 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
229 {
230         struct rte_eth_dev *ethdev;
231
232         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
233         if (ethdev == NULL)
234                 return 0;
235
236         return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
237 }
238
239 static struct rte_pci_driver rte_ngbe_pmd = {
240         .id_table = pci_id_ngbe_map,
241         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
242                      RTE_PCI_DRV_INTR_LSC,
243         .probe = eth_ngbe_pci_probe,
244         .remove = eth_ngbe_pci_remove,
245 };
246
247 static int
248 ngbe_dev_configure(struct rte_eth_dev *dev)
249 {
250         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
251         struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
252
253         PMD_INIT_FUNC_TRACE();
254
255         /* set flag to update link status after init */
256         intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
257
258         /*
259          * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
260          * allocation Rx preconditions we will reset it.
261          */
262         adapter->rx_bulk_alloc_allowed = true;
263
264         return 0;
265 }
266
267 /*
268  * Reset and stop device.
269  */
270 static int
271 ngbe_dev_close(struct rte_eth_dev *dev)
272 {
273         PMD_INIT_FUNC_TRACE();
274
275         RTE_SET_USED(dev);
276
277         return -EINVAL;
278 }
279
280 static int
281 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
282 {
283         struct ngbe_hw *hw = ngbe_dev_hw(dev);
284
285         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
286
287         dev_info->default_rxconf = (struct rte_eth_rxconf) {
288                 .rx_thresh = {
289                         .pthresh = NGBE_DEFAULT_RX_PTHRESH,
290                         .hthresh = NGBE_DEFAULT_RX_HTHRESH,
291                         .wthresh = NGBE_DEFAULT_RX_WTHRESH,
292                 },
293                 .rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
294                 .rx_drop_en = 0,
295                 .offloads = 0,
296         };
297
298         dev_info->rx_desc_lim = rx_desc_lim;
299
300         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
301                                 ETH_LINK_SPEED_10M;
302
303         /* Driver-preferred Rx/Tx parameters */
304         dev_info->default_rxportconf.nb_queues = 1;
305         dev_info->default_rxportconf.ring_size = 256;
306
307         return 0;
308 }
309
310 /* return 0 means link status changed, -1 means not changed */
311 int
312 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
313                             int wait_to_complete)
314 {
315         struct ngbe_hw *hw = ngbe_dev_hw(dev);
316         struct rte_eth_link link;
317         u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
318         u32 lan_speed = 0;
319         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
320         bool link_up;
321         int err;
322         int wait = 1;
323
324         memset(&link, 0, sizeof(link));
325         link.link_status = ETH_LINK_DOWN;
326         link.link_speed = ETH_SPEED_NUM_NONE;
327         link.link_duplex = ETH_LINK_HALF_DUPLEX;
328         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
329                         ~ETH_LINK_SPEED_AUTONEG);
330
331         hw->mac.get_link_status = true;
332
333         if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
334                 return rte_eth_linkstatus_set(dev, &link);
335
336         /* check if it needs to wait to complete, if lsc interrupt is enabled */
337         if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
338                 wait = 0;
339
340         err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
341         if (err != 0) {
342                 link.link_speed = ETH_SPEED_NUM_NONE;
343                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
344                 return rte_eth_linkstatus_set(dev, &link);
345         }
346
347         if (!link_up)
348                 return rte_eth_linkstatus_set(dev, &link);
349
350         intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
351         link.link_status = ETH_LINK_UP;
352         link.link_duplex = ETH_LINK_FULL_DUPLEX;
353
354         switch (link_speed) {
355         default:
356         case NGBE_LINK_SPEED_UNKNOWN:
357                 link.link_speed = ETH_SPEED_NUM_NONE;
358                 break;
359
360         case NGBE_LINK_SPEED_10M_FULL:
361                 link.link_speed = ETH_SPEED_NUM_10M;
362                 lan_speed = 0;
363                 break;
364
365         case NGBE_LINK_SPEED_100M_FULL:
366                 link.link_speed = ETH_SPEED_NUM_100M;
367                 lan_speed = 1;
368                 break;
369
370         case NGBE_LINK_SPEED_1GB_FULL:
371                 link.link_speed = ETH_SPEED_NUM_1G;
372                 lan_speed = 2;
373                 break;
374         }
375
376         if (hw->is_pf) {
377                 wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
378                 if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
379                                 NGBE_LINK_SPEED_100M_FULL |
380                                 NGBE_LINK_SPEED_10M_FULL)) {
381                         wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
382                                 NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
383                 }
384         }
385
386         return rte_eth_linkstatus_set(dev, &link);
387 }
388
389 static int
390 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
391 {
392         return ngbe_dev_link_update_share(dev, wait_to_complete);
393 }
394
395 /*
396  * It reads ICR and sets flag for the link_update.
397  *
398  * @param dev
399  *  Pointer to struct rte_eth_dev.
400  *
401  * @return
402  *  - On success, zero.
403  *  - On failure, a negative value.
404  */
405 static int
406 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
407 {
408         uint32_t eicr;
409         struct ngbe_hw *hw = ngbe_dev_hw(dev);
410         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
411
412         /* clear all cause mask */
413         ngbe_disable_intr(hw);
414
415         /* read-on-clear nic registers here */
416         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
417         PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
418
419         intr->flags = 0;
420
421         /* set flag for async link update */
422         if (eicr & NGBE_ICRMISC_PHY)
423                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
424
425         if (eicr & NGBE_ICRMISC_VFMBX)
426                 intr->flags |= NGBE_FLAG_MAILBOX;
427
428         if (eicr & NGBE_ICRMISC_LNKSEC)
429                 intr->flags |= NGBE_FLAG_MACSEC;
430
431         if (eicr & NGBE_ICRMISC_GPIO)
432                 intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
433
434         return 0;
435 }
436
437 /**
438  * It gets and then prints the link status.
439  *
440  * @param dev
441  *  Pointer to struct rte_eth_dev.
442  *
443  * @return
444  *  - On success, zero.
445  *  - On failure, a negative value.
446  */
447 static void
448 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
449 {
450         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
451         struct rte_eth_link link;
452
453         rte_eth_linkstatus_get(dev, &link);
454
455         if (link.link_status == ETH_LINK_UP) {
456                 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
457                                         (int)(dev->data->port_id),
458                                         (unsigned int)link.link_speed,
459                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
460                                         "full-duplex" : "half-duplex");
461         } else {
462                 PMD_INIT_LOG(INFO, " Port %d: Link Down",
463                                 (int)(dev->data->port_id));
464         }
465         PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
466                                 pci_dev->addr.domain,
467                                 pci_dev->addr.bus,
468                                 pci_dev->addr.devid,
469                                 pci_dev->addr.function);
470 }
471
472 /*
473  * It executes link_update after knowing an interrupt occurred.
474  *
475  * @param dev
476  *  Pointer to struct rte_eth_dev.
477  *
478  * @return
479  *  - On success, zero.
480  *  - On failure, a negative value.
481  */
482 static int
483 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
484 {
485         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
486         int64_t timeout;
487
488         PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
489
490         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
491                 struct rte_eth_link link;
492
493                 /*get the link status before link update, for predicting later*/
494                 rte_eth_linkstatus_get(dev, &link);
495
496                 ngbe_dev_link_update(dev, 0);
497
498                 /* likely to up */
499                 if (link.link_status != ETH_LINK_UP)
500                         /* handle it 1 sec later, wait it being stable */
501                         timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
502                 /* likely to down */
503                 else
504                         /* handle it 4 sec later, wait it being stable */
505                         timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
506
507                 ngbe_dev_link_status_print(dev);
508                 if (rte_eal_alarm_set(timeout * 1000,
509                                       ngbe_dev_interrupt_delayed_handler,
510                                       (void *)dev) < 0) {
511                         PMD_DRV_LOG(ERR, "Error setting alarm");
512                 } else {
513                         /* remember original mask */
514                         intr->mask_misc_orig = intr->mask_misc;
515                         /* only disable lsc interrupt */
516                         intr->mask_misc &= ~NGBE_ICRMISC_PHY;
517
518                         intr->mask_orig = intr->mask;
519                         /* only disable all misc interrupts */
520                         intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
521                 }
522         }
523
524         PMD_DRV_LOG(DEBUG, "enable intr immediately");
525         ngbe_enable_intr(dev);
526
527         return 0;
528 }
529
530 /**
531  * Interrupt handler which shall be registered for alarm callback for delayed
532  * handling specific interrupt to wait for the stable nic state. As the
533  * NIC interrupt state is not stable for ngbe after link is just down,
534  * it needs to wait 4 seconds to get the stable status.
535  *
536  * @param param
537  *  The address of parameter (struct rte_eth_dev *) registered before.
538  */
539 static void
540 ngbe_dev_interrupt_delayed_handler(void *param)
541 {
542         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
543         struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
544         struct ngbe_hw *hw = ngbe_dev_hw(dev);
545         uint32_t eicr;
546
547         ngbe_disable_intr(hw);
548
549         eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
550
551         if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
552                 ngbe_dev_link_update(dev, 0);
553                 intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
554                 ngbe_dev_link_status_print(dev);
555                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
556                                               NULL);
557         }
558
559         if (intr->flags & NGBE_FLAG_MACSEC) {
560                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
561                                               NULL);
562                 intr->flags &= ~NGBE_FLAG_MACSEC;
563         }
564
565         /* restore original mask */
566         intr->mask_misc = intr->mask_misc_orig;
567         intr->mask_misc_orig = 0;
568         intr->mask = intr->mask_orig;
569         intr->mask_orig = 0;
570
571         PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
572         ngbe_enable_intr(dev);
573 }
574
575 /**
576  * Interrupt handler triggered by NIC  for handling
577  * specific interrupt.
578  *
579  * @param param
580  *  The address of parameter (struct rte_eth_dev *) registered before.
581  */
582 static void
583 ngbe_dev_interrupt_handler(void *param)
584 {
585         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
586
587         ngbe_dev_interrupt_get_status(dev);
588         ngbe_dev_interrupt_action(dev);
589 }
590
591 static const struct eth_dev_ops ngbe_eth_dev_ops = {
592         .dev_configure              = ngbe_dev_configure,
593         .dev_infos_get              = ngbe_dev_info_get,
594         .link_update                = ngbe_dev_link_update,
595         .rx_queue_setup             = ngbe_dev_rx_queue_setup,
596         .rx_queue_release           = ngbe_dev_rx_queue_release,
597 };
598
599 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
600 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
601 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
602
603 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
604 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
605
606 #ifdef RTE_ETHDEV_DEBUG_RX
607         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
608 #endif
609 #ifdef RTE_ETHDEV_DEBUG_TX
610         RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
611 #endif