net/enetc: enable Rx and Tx
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7
8 #include "enetc_logs.h"
9 #include "enetc.h"
10
11 int enetc_logtype_pmd;
12
13 /* Functions Prototypes */
14 static int enetc_dev_configure(struct rte_eth_dev *dev);
15 static int enetc_dev_start(struct rte_eth_dev *dev);
16 static void enetc_dev_stop(struct rte_eth_dev *dev);
17 static void enetc_dev_close(struct rte_eth_dev *dev);
18 static void enetc_dev_infos_get(struct rte_eth_dev *dev,
19                                 struct rte_eth_dev_info *dev_info);
20 static int enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
21 static int enetc_hardware_init(struct enetc_eth_hw *hw);
22 static int enetc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
23                 uint16_t nb_rx_desc, unsigned int socket_id,
24                 const struct rte_eth_rxconf *rx_conf,
25                 struct rte_mempool *mb_pool);
26 static void enetc_rx_queue_release(void *rxq);
27 static int enetc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
28                 uint16_t nb_tx_desc, unsigned int socket_id,
29                 const struct rte_eth_txconf *tx_conf);
30 static void enetc_tx_queue_release(void *txq);
31
32 /*
33  * The set of PCI devices this driver supports
34  */
35 static const struct rte_pci_id pci_id_enetc_map[] = {
36         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
37         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
38         { .vendor_id = 0, /* sentinel */ },
39 };
40
41 /* Features supported by this driver */
42 static const struct eth_dev_ops enetc_ops = {
43         .dev_configure        = enetc_dev_configure,
44         .dev_start            = enetc_dev_start,
45         .dev_stop             = enetc_dev_stop,
46         .dev_close            = enetc_dev_close,
47         .link_update          = enetc_link_update,
48         .dev_infos_get        = enetc_dev_infos_get,
49         .rx_queue_setup       = enetc_rx_queue_setup,
50         .rx_queue_release     = enetc_rx_queue_release,
51         .tx_queue_setup       = enetc_tx_queue_setup,
52         .tx_queue_release     = enetc_tx_queue_release,
53 };
54
55 /**
56  * Initialisation of the enetc device
57  *
58  * @param eth_dev
59  *   - Pointer to the structure rte_eth_dev
60  *
61  * @return
62  *   - On success, zero.
63  *   - On failure, negative value.
64  */
65 static int
66 enetc_dev_init(struct rte_eth_dev *eth_dev)
67 {
68         int error = 0;
69         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
70         struct enetc_eth_hw *hw =
71                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
72
73         PMD_INIT_FUNC_TRACE();
74         eth_dev->dev_ops = &enetc_ops;
75         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
76         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
77
78         /* Retrieving and storing the HW base address of device */
79         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
80         hw->device_id = pci_dev->id.device_id;
81
82         error = enetc_hardware_init(hw);
83         if (error != 0) {
84                 ENETC_PMD_ERR("Hardware initialization failed");
85                 return -1;
86         }
87
88         /* Allocate memory for storing MAC addresses */
89         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
90         if (!eth_dev->data->mac_addrs) {
91                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
92                               "store MAC addresses",
93                               ETHER_ADDR_LEN * 1);
94                 error = -ENOMEM;
95                 return -1;
96         }
97
98         /* Copy the permanent MAC address */
99         ether_addr_copy((struct ether_addr *)hw->mac.addr,
100                         &eth_dev->data->mac_addrs[0]);
101
102         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
103                         eth_dev->data->port_id, pci_dev->id.vendor_id,
104                         pci_dev->id.device_id);
105         return 0;
106 }
107
108 static int
109 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
110 {
111         PMD_INIT_FUNC_TRACE();
112         rte_free(eth_dev->data->mac_addrs);
113
114         return 0;
115 }
116
117 static int
118 enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
119 {
120         PMD_INIT_FUNC_TRACE();
121         return 0;
122 }
123
124 static int
125 enetc_dev_start(struct rte_eth_dev *dev)
126 {
127         struct enetc_eth_hw *hw =
128                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
129         uint32_t val;
130
131         PMD_INIT_FUNC_TRACE();
132         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
133                              ENETC_PM0_CMD_CFG));
134         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
135                         val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
136
137         /* Enable port */
138         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
139         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
140                         val | ENETC_PMR_EN);
141
142         return 0;
143 }
144
145 static void
146 enetc_dev_stop(struct rte_eth_dev *dev)
147 {
148         struct enetc_eth_hw *hw =
149                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
150         uint32_t val;
151
152         PMD_INIT_FUNC_TRACE();
153         /* Disable port */
154         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
155         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
156                         val & (~ENETC_PMR_EN));
157
158         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
159                              ENETC_PM0_CMD_CFG));
160         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
161                         val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
162 }
163
164 static void
165 enetc_dev_close(struct rte_eth_dev *dev)
166 {
167         uint16_t i;
168
169         PMD_INIT_FUNC_TRACE();
170         enetc_dev_stop(dev);
171
172         for (i = 0; i < dev->data->nb_rx_queues; i++) {
173                 enetc_rx_queue_release(dev->data->rx_queues[i]);
174                 dev->data->rx_queues[i] = NULL;
175         }
176         dev->data->nb_rx_queues = 0;
177
178         for (i = 0; i < dev->data->nb_tx_queues; i++) {
179                 enetc_tx_queue_release(dev->data->tx_queues[i]);
180                 dev->data->tx_queues[i] = NULL;
181         }
182         dev->data->nb_tx_queues = 0;
183 }
184
185 /* return 0 means link status changed, -1 means not changed */
186 static int
187 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
188 {
189         struct enetc_eth_hw *hw =
190                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
191         struct rte_eth_link link;
192         uint32_t status;
193
194         PMD_INIT_FUNC_TRACE();
195
196         memset(&link, 0, sizeof(link));
197
198         status = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
199                                 ENETC_PM0_STATUS));
200
201         if (status & ENETC_LINK_MODE)
202                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
203         else
204                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
205
206         if (status & ENETC_LINK_STATUS)
207                 link.link_status = ETH_LINK_UP;
208         else
209                 link.link_status = ETH_LINK_DOWN;
210
211         switch (status & ENETC_LINK_SPEED_MASK) {
212         case ENETC_LINK_SPEED_1G:
213                 link.link_speed = ETH_SPEED_NUM_1G;
214                 break;
215
216         case ENETC_LINK_SPEED_100M:
217                 link.link_speed = ETH_SPEED_NUM_100M;
218                 break;
219
220         default:
221         case ENETC_LINK_SPEED_10M:
222                 link.link_speed = ETH_SPEED_NUM_10M;
223         }
224
225         return rte_eth_linkstatus_set(dev, &link);
226 }
227
228 static int
229 enetc_hardware_init(struct enetc_eth_hw *hw)
230 {
231         uint32_t psipmr = 0;
232
233         PMD_INIT_FUNC_TRACE();
234         /* Calculating and storing the base HW addresses */
235         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
236         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
237
238         /* Enabling Station Interface */
239         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.reg, ENETC_SIMR),
240                                           ENETC_SIMR_EN);
241
242         /* Setting to accept broadcast packets for each inetrface */
243         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0) |
244                   ENETC_PSIPMR_SET_VLAN_MP(0);
245         psipmr |= ENETC_PSIPMR_SET_UP(1) | ENETC_PSIPMR_SET_MP(1) |
246                   ENETC_PSIPMR_SET_VLAN_MP(1);
247         psipmr |= ENETC_PSIPMR_SET_UP(2) | ENETC_PSIPMR_SET_MP(2) |
248                   ENETC_PSIPMR_SET_VLAN_MP(2);
249
250         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMR),
251                         psipmr);
252
253         /* Enabling broadcast address */
254         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR0(0)),
255                         0xFFFFFFFF);
256         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR1(0)),
257                         0xFFFF << 16);
258
259         return 0;
260 }
261
262 static void
263 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
264                     struct rte_eth_dev_info *dev_info)
265 {
266         PMD_INIT_FUNC_TRACE();
267         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
268                 .nb_max = MAX_BD_COUNT,
269                 .nb_min = MIN_BD_COUNT,
270                 .nb_align = BD_ALIGN,
271         };
272         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
273                 .nb_max = MAX_BD_COUNT,
274                 .nb_min = MIN_BD_COUNT,
275                 .nb_align = BD_ALIGN,
276         };
277         dev_info->max_rx_queues = MAX_RX_RINGS;
278         dev_info->max_tx_queues = MAX_TX_RINGS;
279         dev_info->max_rx_pktlen = 1500;
280 }
281
282 static int
283 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
284 {
285         int size;
286
287         size = nb_desc * sizeof(struct enetc_swbd);
288         txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
289         if (txr->q_swbd == NULL)
290                 return -ENOMEM;
291
292         size = nb_desc * sizeof(struct enetc_tx_bd);
293         txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
294         if (txr->bd_base == NULL) {
295                 rte_free(txr->q_swbd);
296                 txr->q_swbd = NULL;
297                 return -ENOMEM;
298         }
299
300         txr->bd_count = nb_desc;
301         txr->next_to_clean = 0;
302         txr->next_to_use = 0;
303
304         return 0;
305 }
306
307 static void
308 enetc_free_bdr(struct enetc_bdr *rxr)
309 {
310         rte_free(rxr->q_swbd);
311         rte_free(rxr->bd_base);
312         rxr->q_swbd = NULL;
313         rxr->bd_base = NULL;
314 }
315
316 static void
317 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
318 {
319         int idx = tx_ring->index;
320         uintptr_t base_addr;
321         uint32_t tbmr;
322
323         base_addr = (uintptr_t)tx_ring->bd_base;
324         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
325                        lower_32_bits((uint64_t)base_addr));
326         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
327                        upper_32_bits((uint64_t)base_addr));
328         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
329                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
330
331         tbmr = ENETC_TBMR_EN;
332         /* enable ring */
333         enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
334         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
335         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
336         tx_ring->tcir = (void *)((size_t)hw->reg +
337                         ENETC_BDR(TX, idx, ENETC_TBCIR));
338         tx_ring->tcisr = (void *)((size_t)hw->reg +
339                          ENETC_BDR(TX, idx, ENETC_TBCISR));
340 }
341
342 static int
343 enetc_alloc_tx_resources(struct rte_eth_dev *dev,
344                          uint16_t queue_idx,
345                          uint16_t nb_desc)
346 {
347         int err;
348         struct enetc_bdr *tx_ring;
349         struct rte_eth_dev_data *data = dev->data;
350         struct enetc_eth_adapter *priv =
351                         ENETC_DEV_PRIVATE(data->dev_private);
352
353         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
354         if (tx_ring == NULL) {
355                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
356                 err = -ENOMEM;
357                 return -1;
358         }
359
360         err = enetc_alloc_txbdr(tx_ring, nb_desc);
361         if (err)
362                 goto fail;
363
364         tx_ring->index = queue_idx;
365         tx_ring->ndev = dev;
366         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
367         data->tx_queues[queue_idx] = tx_ring;
368
369         return 0;
370 fail:
371         rte_free(tx_ring);
372
373         return err;
374 }
375
376 static int
377 enetc_tx_queue_setup(struct rte_eth_dev *dev,
378                      uint16_t queue_idx,
379                      uint16_t nb_desc,
380                      unsigned int socket_id __rte_unused,
381                      const struct rte_eth_txconf *tx_conf __rte_unused)
382 {
383         int err = 0;
384
385         PMD_INIT_FUNC_TRACE();
386         if (nb_desc > MAX_BD_COUNT)
387                 return -1;
388
389         err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
390
391         return err;
392 }
393
394 static void
395 enetc_tx_queue_release(void *txq)
396 {
397         if (txq == NULL)
398                 return;
399
400         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
401         struct enetc_eth_hw *eth_hw =
402                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
403         struct enetc_hw *hw;
404         struct enetc_swbd *tx_swbd;
405         int i;
406         uint32_t val;
407
408         /* Disable the ring */
409         hw = &eth_hw->hw;
410         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
411         val &= (~ENETC_TBMR_EN);
412         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
413
414         /* clean the ring*/
415         i = tx_ring->next_to_clean;
416         tx_swbd = &tx_ring->q_swbd[i];
417         while (tx_swbd->buffer_addr != NULL) {
418                 rte_pktmbuf_free(tx_swbd->buffer_addr);
419                 tx_swbd->buffer_addr = NULL;
420                 tx_swbd++;
421                 i++;
422                 if (unlikely(i == tx_ring->bd_count)) {
423                         i = 0;
424                         tx_swbd = &tx_ring->q_swbd[i];
425                 }
426         }
427
428         enetc_free_bdr(tx_ring);
429         rte_free(tx_ring);
430 }
431
432 static int
433 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
434                   uint16_t nb_rx_desc)
435 {
436         int size;
437
438         size = nb_rx_desc * sizeof(struct enetc_swbd);
439         rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
440         if (rxr->q_swbd == NULL)
441                 return -ENOMEM;
442
443         size = nb_rx_desc * sizeof(union enetc_rx_bd);
444         rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
445         if (rxr->bd_base == NULL) {
446                 rte_free(rxr->q_swbd);
447                 rxr->q_swbd = NULL;
448                 return -ENOMEM;
449         }
450
451         rxr->bd_count = nb_rx_desc;
452         rxr->next_to_clean = 0;
453         rxr->next_to_use = 0;
454         rxr->next_to_alloc = 0;
455
456         return 0;
457 }
458
459 static void
460 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
461                   struct rte_mempool *mb_pool)
462 {
463         int idx = rx_ring->index;
464         uintptr_t base_addr;
465         uint16_t buf_size;
466
467         base_addr = (uintptr_t)rx_ring->bd_base;
468         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
469                        lower_32_bits((uint64_t)base_addr));
470         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
471                        upper_32_bits((uint64_t)base_addr));
472         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
473                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
474
475         rx_ring->mb_pool = mb_pool;
476         /* enable ring */
477         enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
478         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
479         rx_ring->rcir = (void *)((size_t)hw->reg +
480                         ENETC_BDR(RX, idx, ENETC_RBCIR));
481         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
482         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
483                    RTE_PKTMBUF_HEADROOM);
484         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
485 }
486
487 static int
488 enetc_alloc_rx_resources(struct rte_eth_dev *dev,
489                          uint16_t rx_queue_id,
490                          uint16_t nb_rx_desc,
491                          struct rte_mempool *mb_pool)
492 {
493         int err;
494         struct enetc_bdr *rx_ring;
495         struct rte_eth_dev_data *data =  dev->data;
496         struct enetc_eth_adapter *adapter =
497                         ENETC_DEV_PRIVATE(data->dev_private);
498
499         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
500         if (rx_ring == NULL) {
501                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
502                 err = -ENOMEM;
503                 return err;
504         }
505
506         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
507         if (err)
508                 goto fail;
509
510         rx_ring->index = rx_queue_id;
511         rx_ring->ndev = dev;
512         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
513         data->rx_queues[rx_queue_id] = rx_ring;
514
515         return 0;
516 fail:
517         rte_free(rx_ring);
518
519         return err;
520 }
521
522 static int
523 enetc_rx_queue_setup(struct rte_eth_dev *dev,
524                      uint16_t rx_queue_id,
525                      uint16_t nb_rx_desc,
526                      unsigned int socket_id __rte_unused,
527                      const struct rte_eth_rxconf *rx_conf __rte_unused,
528                      struct rte_mempool *mb_pool)
529 {
530         int err = 0;
531
532         PMD_INIT_FUNC_TRACE();
533         if (nb_rx_desc > MAX_BD_COUNT)
534                 return -1;
535
536         err = enetc_alloc_rx_resources(dev, rx_queue_id,
537                                        nb_rx_desc,
538                                        mb_pool);
539
540         return err;
541 }
542
543 static void
544 enetc_rx_queue_release(void *rxq)
545 {
546         if (rxq == NULL)
547                 return;
548
549         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
550         struct enetc_eth_hw *eth_hw =
551                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
552         struct enetc_swbd *q_swbd;
553         struct enetc_hw *hw;
554         uint32_t val;
555         int i;
556
557         /* Disable the ring */
558         hw = &eth_hw->hw;
559         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
560         val &= (~ENETC_RBMR_EN);
561         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
562
563         /* Clean the ring */
564         i = rx_ring->next_to_clean;
565         q_swbd = &rx_ring->q_swbd[i];
566         while (i != rx_ring->next_to_use) {
567                 rte_pktmbuf_free(q_swbd->buffer_addr);
568                 q_swbd->buffer_addr = NULL;
569                 q_swbd++;
570                 i++;
571                 if (unlikely(i == rx_ring->bd_count)) {
572                         i = 0;
573                         q_swbd = &rx_ring->q_swbd[i];
574                 }
575         }
576
577         enetc_free_bdr(rx_ring);
578         rte_free(rx_ring);
579 }
580
581 static int
582 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
583                            struct rte_pci_device *pci_dev)
584 {
585         return rte_eth_dev_pci_generic_probe(pci_dev,
586                                              sizeof(struct enetc_eth_adapter),
587                                              enetc_dev_init);
588 }
589
590 static int
591 enetc_pci_remove(struct rte_pci_device *pci_dev)
592 {
593         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
594 }
595
596 static struct rte_pci_driver rte_enetc_pmd = {
597         .id_table = pci_id_enetc_map,
598         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
599         .probe = enetc_pci_probe,
600         .remove = enetc_pci_remove,
601 };
602
603 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
604 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
605 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
606
607 RTE_INIT(enetc_pmd_init_log)
608 {
609         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
610         if (enetc_logtype_pmd >= 0)
611                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
612 }