net/enetc: enable promiscuous and allmulticast
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7
8 #include "enetc_logs.h"
9 #include "enetc.h"
10
11 int enetc_logtype_pmd;
12
13 static int
14 enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
15 {
16         PMD_INIT_FUNC_TRACE();
17         return 0;
18 }
19
20 static int
21 enetc_dev_start(struct rte_eth_dev *dev)
22 {
23         struct enetc_eth_hw *hw =
24                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
25         struct enetc_hw *enetc_hw = &hw->hw;
26         uint32_t val;
27
28         PMD_INIT_FUNC_TRACE();
29         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
30         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
31                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
32
33         /* Enable port */
34         val = enetc_port_rd(enetc_hw, ENETC_PMR);
35         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
36
37         /* set auto-speed for RGMII */
38         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
39                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40                               ENETC_PM0_IFM_RGAUTO);
41                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42                               ENETC_PM0_IFM_RGAUTO);
43         }
44         if (enetc_global_rd(enetc_hw,
45                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
46                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
47                               ENETC_PM0_IFM_XGMII);
48                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
49                               ENETC_PM0_IFM_XGMII);
50         }
51
52         return 0;
53 }
54
55 static void
56 enetc_dev_stop(struct rte_eth_dev *dev)
57 {
58         struct enetc_eth_hw *hw =
59                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
60         struct enetc_hw *enetc_hw = &hw->hw;
61         uint32_t val;
62
63         PMD_INIT_FUNC_TRACE();
64         /* Disable port */
65         val = enetc_port_rd(enetc_hw, ENETC_PMR);
66         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
67
68         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
69         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
70                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
71 }
72
73 static const uint32_t *
74 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
75 {
76         static const uint32_t ptypes[] = {
77                 RTE_PTYPE_L2_ETHER,
78                 RTE_PTYPE_L3_IPV4,
79                 RTE_PTYPE_L3_IPV6,
80                 RTE_PTYPE_L4_TCP,
81                 RTE_PTYPE_L4_UDP,
82                 RTE_PTYPE_L4_SCTP,
83                 RTE_PTYPE_L4_ICMP,
84                 RTE_PTYPE_UNKNOWN
85         };
86
87         return ptypes;
88 }
89
90 /* return 0 means link status changed, -1 means not changed */
91 static int
92 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
93 {
94         struct enetc_eth_hw *hw =
95                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
96         struct enetc_hw *enetc_hw = &hw->hw;
97         struct rte_eth_link link;
98         uint32_t status;
99
100         PMD_INIT_FUNC_TRACE();
101
102         memset(&link, 0, sizeof(link));
103
104         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
105
106         if (status & ENETC_LINK_MODE)
107                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
108         else
109                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
110
111         if (status & ENETC_LINK_STATUS)
112                 link.link_status = ETH_LINK_UP;
113         else
114                 link.link_status = ETH_LINK_DOWN;
115
116         switch (status & ENETC_LINK_SPEED_MASK) {
117         case ENETC_LINK_SPEED_1G:
118                 link.link_speed = ETH_SPEED_NUM_1G;
119                 break;
120
121         case ENETC_LINK_SPEED_100M:
122                 link.link_speed = ETH_SPEED_NUM_100M;
123                 break;
124
125         default:
126         case ENETC_LINK_SPEED_10M:
127                 link.link_speed = ETH_SPEED_NUM_10M;
128         }
129
130         return rte_eth_linkstatus_set(dev, &link);
131 }
132
133 static int
134 enetc_hardware_init(struct enetc_eth_hw *hw)
135 {
136         struct enetc_hw *enetc_hw = &hw->hw;
137         uint32_t *mac = (uint32_t *)hw->mac.addr;
138
139         PMD_INIT_FUNC_TRACE();
140         /* Calculating and storing the base HW addresses */
141         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
142         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
143
144         /* Enabling Station Interface */
145         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
146
147         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
148         mac++;
149         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
150
151         return 0;
152 }
153
154 static void
155 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
156                     struct rte_eth_dev_info *dev_info)
157 {
158         PMD_INIT_FUNC_TRACE();
159         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
160                 .nb_max = MAX_BD_COUNT,
161                 .nb_min = MIN_BD_COUNT,
162                 .nb_align = BD_ALIGN,
163         };
164         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
165                 .nb_max = MAX_BD_COUNT,
166                 .nb_min = MIN_BD_COUNT,
167                 .nb_align = BD_ALIGN,
168         };
169         dev_info->max_rx_queues = MAX_RX_RINGS;
170         dev_info->max_tx_queues = MAX_TX_RINGS;
171         dev_info->max_rx_pktlen = 1500;
172 }
173
174 static int
175 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
176 {
177         int size;
178
179         size = nb_desc * sizeof(struct enetc_swbd);
180         txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
181         if (txr->q_swbd == NULL)
182                 return -ENOMEM;
183
184         size = nb_desc * sizeof(struct enetc_tx_bd);
185         txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
186         if (txr->bd_base == NULL) {
187                 rte_free(txr->q_swbd);
188                 txr->q_swbd = NULL;
189                 return -ENOMEM;
190         }
191
192         txr->bd_count = nb_desc;
193         txr->next_to_clean = 0;
194         txr->next_to_use = 0;
195
196         return 0;
197 }
198
199 static void
200 enetc_free_bdr(struct enetc_bdr *rxr)
201 {
202         rte_free(rxr->q_swbd);
203         rte_free(rxr->bd_base);
204         rxr->q_swbd = NULL;
205         rxr->bd_base = NULL;
206 }
207
208 static void
209 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
210 {
211         int idx = tx_ring->index;
212         uint32_t tbmr;
213         phys_addr_t bd_address;
214
215         bd_address = (phys_addr_t)
216                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
217         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
218                        lower_32_bits((uint64_t)bd_address));
219         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
220                        upper_32_bits((uint64_t)bd_address));
221         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
222                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
223
224         tbmr = ENETC_TBMR_EN;
225         /* enable ring */
226         enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
227         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
228         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
229         tx_ring->tcir = (void *)((size_t)hw->reg +
230                         ENETC_BDR(TX, idx, ENETC_TBCIR));
231         tx_ring->tcisr = (void *)((size_t)hw->reg +
232                          ENETC_BDR(TX, idx, ENETC_TBCISR));
233 }
234
235 static int
236 enetc_alloc_tx_resources(struct rte_eth_dev *dev,
237                          uint16_t queue_idx,
238                          uint16_t nb_desc)
239 {
240         int err;
241         struct enetc_bdr *tx_ring;
242         struct rte_eth_dev_data *data = dev->data;
243         struct enetc_eth_adapter *priv =
244                         ENETC_DEV_PRIVATE(data->dev_private);
245
246         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
247         if (tx_ring == NULL) {
248                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
249                 err = -ENOMEM;
250                 return -1;
251         }
252
253         err = enetc_alloc_txbdr(tx_ring, nb_desc);
254         if (err)
255                 goto fail;
256
257         tx_ring->index = queue_idx;
258         tx_ring->ndev = dev;
259         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
260         data->tx_queues[queue_idx] = tx_ring;
261
262         return 0;
263 fail:
264         rte_free(tx_ring);
265
266         return err;
267 }
268
269 static int
270 enetc_tx_queue_setup(struct rte_eth_dev *dev,
271                      uint16_t queue_idx,
272                      uint16_t nb_desc,
273                      unsigned int socket_id __rte_unused,
274                      const struct rte_eth_txconf *tx_conf __rte_unused)
275 {
276         int err = 0;
277
278         PMD_INIT_FUNC_TRACE();
279         if (nb_desc > MAX_BD_COUNT)
280                 return -1;
281
282         err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
283
284         return err;
285 }
286
287 static void
288 enetc_tx_queue_release(void *txq)
289 {
290         if (txq == NULL)
291                 return;
292
293         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
294         struct enetc_eth_hw *eth_hw =
295                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
296         struct enetc_hw *hw;
297         struct enetc_swbd *tx_swbd;
298         int i;
299         uint32_t val;
300
301         /* Disable the ring */
302         hw = &eth_hw->hw;
303         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
304         val &= (~ENETC_TBMR_EN);
305         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
306
307         /* clean the ring*/
308         i = tx_ring->next_to_clean;
309         tx_swbd = &tx_ring->q_swbd[i];
310         while (tx_swbd->buffer_addr != NULL) {
311                 rte_pktmbuf_free(tx_swbd->buffer_addr);
312                 tx_swbd->buffer_addr = NULL;
313                 tx_swbd++;
314                 i++;
315                 if (unlikely(i == tx_ring->bd_count)) {
316                         i = 0;
317                         tx_swbd = &tx_ring->q_swbd[i];
318                 }
319         }
320
321         enetc_free_bdr(tx_ring);
322         rte_free(tx_ring);
323 }
324
325 static int
326 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
327                   uint16_t nb_rx_desc)
328 {
329         int size;
330
331         size = nb_rx_desc * sizeof(struct enetc_swbd);
332         rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
333         if (rxr->q_swbd == NULL)
334                 return -ENOMEM;
335
336         size = nb_rx_desc * sizeof(union enetc_rx_bd);
337         rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
338         if (rxr->bd_base == NULL) {
339                 rte_free(rxr->q_swbd);
340                 rxr->q_swbd = NULL;
341                 return -ENOMEM;
342         }
343
344         rxr->bd_count = nb_rx_desc;
345         rxr->next_to_clean = 0;
346         rxr->next_to_use = 0;
347         rxr->next_to_alloc = 0;
348
349         return 0;
350 }
351
352 static void
353 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
354                   struct rte_mempool *mb_pool)
355 {
356         int idx = rx_ring->index;
357         uint16_t buf_size;
358         phys_addr_t bd_address;
359
360         bd_address = (phys_addr_t)
361                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
362         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
363                        lower_32_bits((uint64_t)bd_address));
364         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
365                        upper_32_bits((uint64_t)bd_address));
366         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
367                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
368
369         rx_ring->mb_pool = mb_pool;
370         rx_ring->rcir = (void *)((size_t)hw->reg +
371                         ENETC_BDR(RX, idx, ENETC_RBCIR));
372         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
373         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
374                    RTE_PKTMBUF_HEADROOM);
375         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
376         /* enable ring */
377         enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
378         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
379 }
380
381 static int
382 enetc_alloc_rx_resources(struct rte_eth_dev *dev,
383                          uint16_t rx_queue_id,
384                          uint16_t nb_rx_desc,
385                          struct rte_mempool *mb_pool)
386 {
387         int err;
388         struct enetc_bdr *rx_ring;
389         struct rte_eth_dev_data *data =  dev->data;
390         struct enetc_eth_adapter *adapter =
391                         ENETC_DEV_PRIVATE(data->dev_private);
392
393         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
394         if (rx_ring == NULL) {
395                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
396                 err = -ENOMEM;
397                 return err;
398         }
399
400         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
401         if (err)
402                 goto fail;
403
404         rx_ring->index = rx_queue_id;
405         rx_ring->ndev = dev;
406         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
407         data->rx_queues[rx_queue_id] = rx_ring;
408
409         return 0;
410 fail:
411         rte_free(rx_ring);
412
413         return err;
414 }
415
416 static int
417 enetc_rx_queue_setup(struct rte_eth_dev *dev,
418                      uint16_t rx_queue_id,
419                      uint16_t nb_rx_desc,
420                      unsigned int socket_id __rte_unused,
421                      const struct rte_eth_rxconf *rx_conf __rte_unused,
422                      struct rte_mempool *mb_pool)
423 {
424         int err = 0;
425
426         PMD_INIT_FUNC_TRACE();
427         if (nb_rx_desc > MAX_BD_COUNT)
428                 return -1;
429
430         err = enetc_alloc_rx_resources(dev, rx_queue_id,
431                                        nb_rx_desc,
432                                        mb_pool);
433
434         return err;
435 }
436
437 static void
438 enetc_rx_queue_release(void *rxq)
439 {
440         if (rxq == NULL)
441                 return;
442
443         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
444         struct enetc_eth_hw *eth_hw =
445                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
446         struct enetc_swbd *q_swbd;
447         struct enetc_hw *hw;
448         uint32_t val;
449         int i;
450
451         /* Disable the ring */
452         hw = &eth_hw->hw;
453         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
454         val &= (~ENETC_RBMR_EN);
455         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
456
457         /* Clean the ring */
458         i = rx_ring->next_to_clean;
459         q_swbd = &rx_ring->q_swbd[i];
460         while (i != rx_ring->next_to_use) {
461                 rte_pktmbuf_free(q_swbd->buffer_addr);
462                 q_swbd->buffer_addr = NULL;
463                 q_swbd++;
464                 i++;
465                 if (unlikely(i == rx_ring->bd_count)) {
466                         i = 0;
467                         q_swbd = &rx_ring->q_swbd[i];
468                 }
469         }
470
471         enetc_free_bdr(rx_ring);
472         rte_free(rx_ring);
473 }
474
475 static
476 int enetc_stats_get(struct rte_eth_dev *dev,
477                     struct rte_eth_stats *stats)
478 {
479         struct enetc_eth_hw *hw =
480                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
481         struct enetc_hw *enetc_hw = &hw->hw;
482
483         /* Total received packets, bad + good, if we want to get counters of
484          * only good received packets then use ENETC_PM0_RFRM,
485          * ENETC_PM0_TFRM registers.
486          */
487         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
488         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
489         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
490         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
491         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
492          * truncated packets
493          */
494         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
495         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
496         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
497
498         return 0;
499 }
500
501 static void
502 enetc_stats_reset(struct rte_eth_dev *dev)
503 {
504         struct enetc_eth_hw *hw =
505                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
506         struct enetc_hw *enetc_hw = &hw->hw;
507
508         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
509 }
510
511 static void
512 enetc_dev_close(struct rte_eth_dev *dev)
513 {
514         uint16_t i;
515
516         PMD_INIT_FUNC_TRACE();
517         enetc_dev_stop(dev);
518
519         for (i = 0; i < dev->data->nb_rx_queues; i++) {
520                 enetc_rx_queue_release(dev->data->rx_queues[i]);
521                 dev->data->rx_queues[i] = NULL;
522         }
523         dev->data->nb_rx_queues = 0;
524
525         for (i = 0; i < dev->data->nb_tx_queues; i++) {
526                 enetc_tx_queue_release(dev->data->tx_queues[i]);
527                 dev->data->tx_queues[i] = NULL;
528         }
529         dev->data->nb_tx_queues = 0;
530 }
531
532 static void
533 enetc_promiscuous_enable(struct rte_eth_dev *dev)
534 {
535         struct enetc_eth_hw *hw =
536                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537         struct enetc_hw *enetc_hw = &hw->hw;
538         uint32_t psipmr = 0;
539
540         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
541
542         /* Setting to enable promiscuous mode*/
543         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
544
545         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
546 }
547
548 static void
549 enetc_promiscuous_disable(struct rte_eth_dev *dev)
550 {
551         struct enetc_eth_hw *hw =
552                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553         struct enetc_hw *enetc_hw = &hw->hw;
554         uint32_t psipmr = 0;
555
556         /* Setting to disable promiscuous mode for SI0*/
557         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
558         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
559
560         if (dev->data->all_multicast == 0)
561                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
562
563         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
564 }
565
566 static void
567 enetc_allmulticast_enable(struct rte_eth_dev *dev)
568 {
569         struct enetc_eth_hw *hw =
570                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571         struct enetc_hw *enetc_hw = &hw->hw;
572         uint32_t psipmr = 0;
573
574         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
575
576         /* Setting to enable allmulticast mode for SI0*/
577         psipmr |= ENETC_PSIPMR_SET_MP(0);
578
579         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
580 }
581
582 static void
583 enetc_allmulticast_disable(struct rte_eth_dev *dev)
584 {
585         struct enetc_eth_hw *hw =
586                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
587         struct enetc_hw *enetc_hw = &hw->hw;
588         uint32_t psipmr = 0;
589
590         if (dev->data->promiscuous == 1)
591                 return; /* must remain in all_multicast mode */
592
593         /* Setting to disable all multicast mode for SI0*/
594         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
595                                ~(ENETC_PSIPMR_SET_MP(0));
596
597         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
598 }
599
600 /*
601  * The set of PCI devices this driver supports
602  */
603 static const struct rte_pci_id pci_id_enetc_map[] = {
604         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
605         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
606         { .vendor_id = 0, /* sentinel */ },
607 };
608
609 /* Features supported by this driver */
610 static const struct eth_dev_ops enetc_ops = {
611         .dev_configure        = enetc_dev_configure,
612         .dev_start            = enetc_dev_start,
613         .dev_stop             = enetc_dev_stop,
614         .dev_close            = enetc_dev_close,
615         .link_update          = enetc_link_update,
616         .stats_get            = enetc_stats_get,
617         .stats_reset          = enetc_stats_reset,
618         .promiscuous_enable   = enetc_promiscuous_enable,
619         .promiscuous_disable  = enetc_promiscuous_disable,
620         .allmulticast_enable  = enetc_allmulticast_enable,
621         .allmulticast_disable = enetc_allmulticast_disable,
622         .dev_infos_get        = enetc_dev_infos_get,
623         .rx_queue_setup       = enetc_rx_queue_setup,
624         .rx_queue_release     = enetc_rx_queue_release,
625         .tx_queue_setup       = enetc_tx_queue_setup,
626         .tx_queue_release     = enetc_tx_queue_release,
627         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
628 };
629
630 /**
631  * Initialisation of the enetc device
632  *
633  * @param eth_dev
634  *   - Pointer to the structure rte_eth_dev
635  *
636  * @return
637  *   - On success, zero.
638  *   - On failure, negative value.
639  */
640 static int
641 enetc_dev_init(struct rte_eth_dev *eth_dev)
642 {
643         int error = 0;
644         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
645         struct enetc_eth_hw *hw =
646                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
647
648         PMD_INIT_FUNC_TRACE();
649         eth_dev->dev_ops = &enetc_ops;
650         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
651         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
652
653         /* Retrieving and storing the HW base address of device */
654         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
655         hw->device_id = pci_dev->id.device_id;
656
657         error = enetc_hardware_init(hw);
658         if (error != 0) {
659                 ENETC_PMD_ERR("Hardware initialization failed");
660                 return -1;
661         }
662
663         /* Allocate memory for storing MAC addresses */
664         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
665         if (!eth_dev->data->mac_addrs) {
666                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
667                               "store MAC addresses",
668                               ETHER_ADDR_LEN * 1);
669                 error = -ENOMEM;
670                 return -1;
671         }
672
673         /* Copy the permanent MAC address */
674         ether_addr_copy((struct ether_addr *)hw->mac.addr,
675                         &eth_dev->data->mac_addrs[0]);
676
677         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
678                         eth_dev->data->port_id, pci_dev->id.vendor_id,
679                         pci_dev->id.device_id);
680         return 0;
681 }
682
683 static int
684 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
685 {
686         PMD_INIT_FUNC_TRACE();
687         return 0;
688 }
689
690 static int
691 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
692                            struct rte_pci_device *pci_dev)
693 {
694         return rte_eth_dev_pci_generic_probe(pci_dev,
695                                              sizeof(struct enetc_eth_adapter),
696                                              enetc_dev_init);
697 }
698
699 static int
700 enetc_pci_remove(struct rte_pci_device *pci_dev)
701 {
702         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
703 }
704
705 static struct rte_pci_driver rte_enetc_pmd = {
706         .id_table = pci_id_enetc_map,
707         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
708         .probe = enetc_pci_probe,
709         .remove = enetc_pci_remove,
710 };
711
712 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
713 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
714 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
715
716 RTE_INIT(enetc_pmd_init_log)
717 {
718         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
719         if (enetc_logtype_pmd >= 0)
720                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
721 }