net/enetc: add basic statistics
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7
8 #include "enetc_logs.h"
9 #include "enetc.h"
10
11 int enetc_logtype_pmd;
12
13 /* Functions Prototypes */
14 static int enetc_dev_configure(struct rte_eth_dev *dev);
15 static int enetc_dev_start(struct rte_eth_dev *dev);
16 static void enetc_dev_stop(struct rte_eth_dev *dev);
17 static void enetc_dev_close(struct rte_eth_dev *dev);
18 static void enetc_dev_infos_get(struct rte_eth_dev *dev,
19                                 struct rte_eth_dev_info *dev_info);
20 static int enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
21 static int enetc_hardware_init(struct enetc_eth_hw *hw);
22 static int enetc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
23                 uint16_t nb_rx_desc, unsigned int socket_id,
24                 const struct rte_eth_rxconf *rx_conf,
25                 struct rte_mempool *mb_pool);
26 static void enetc_rx_queue_release(void *rxq);
27 static int enetc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
28                 uint16_t nb_tx_desc, unsigned int socket_id,
29                 const struct rte_eth_txconf *tx_conf);
30 static void enetc_tx_queue_release(void *txq);
31 static const uint32_t *enetc_supported_ptypes_get(struct rte_eth_dev *dev);
32 static int enetc_stats_get(struct rte_eth_dev *dev,
33                 struct rte_eth_stats *stats);
34 static void enetc_stats_reset(struct rte_eth_dev *dev);
35
36 /*
37  * The set of PCI devices this driver supports
38  */
39 static const struct rte_pci_id pci_id_enetc_map[] = {
40         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
41         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
42         { .vendor_id = 0, /* sentinel */ },
43 };
44
45 /* Features supported by this driver */
46 static const struct eth_dev_ops enetc_ops = {
47         .dev_configure        = enetc_dev_configure,
48         .dev_start            = enetc_dev_start,
49         .dev_stop             = enetc_dev_stop,
50         .dev_close            = enetc_dev_close,
51         .link_update          = enetc_link_update,
52         .stats_get            = enetc_stats_get,
53         .stats_reset          = enetc_stats_reset,
54         .dev_infos_get        = enetc_dev_infos_get,
55         .rx_queue_setup       = enetc_rx_queue_setup,
56         .rx_queue_release     = enetc_rx_queue_release,
57         .tx_queue_setup       = enetc_tx_queue_setup,
58         .tx_queue_release     = enetc_tx_queue_release,
59         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
60 };
61
62 /**
63  * Initialisation of the enetc device
64  *
65  * @param eth_dev
66  *   - Pointer to the structure rte_eth_dev
67  *
68  * @return
69  *   - On success, zero.
70  *   - On failure, negative value.
71  */
72 static int
73 enetc_dev_init(struct rte_eth_dev *eth_dev)
74 {
75         int error = 0;
76         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
77         struct enetc_eth_hw *hw =
78                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
79
80         PMD_INIT_FUNC_TRACE();
81         eth_dev->dev_ops = &enetc_ops;
82         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
83         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
84
85         /* Retrieving and storing the HW base address of device */
86         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
87         hw->device_id = pci_dev->id.device_id;
88
89         error = enetc_hardware_init(hw);
90         if (error != 0) {
91                 ENETC_PMD_ERR("Hardware initialization failed");
92                 return -1;
93         }
94
95         /* Allocate memory for storing MAC addresses */
96         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
97         if (!eth_dev->data->mac_addrs) {
98                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
99                               "store MAC addresses",
100                               ETHER_ADDR_LEN * 1);
101                 error = -ENOMEM;
102                 return -1;
103         }
104
105         /* Copy the permanent MAC address */
106         ether_addr_copy((struct ether_addr *)hw->mac.addr,
107                         &eth_dev->data->mac_addrs[0]);
108
109         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
110                         eth_dev->data->port_id, pci_dev->id.vendor_id,
111                         pci_dev->id.device_id);
112         return 0;
113 }
114
115 static int
116 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
117 {
118         PMD_INIT_FUNC_TRACE();
119         return 0;
120 }
121
122 static int
123 enetc_dev_configure(struct rte_eth_dev *dev __rte_unused)
124 {
125         PMD_INIT_FUNC_TRACE();
126         return 0;
127 }
128
129 static int
130 enetc_dev_start(struct rte_eth_dev *dev)
131 {
132         struct enetc_eth_hw *hw =
133                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
134         uint32_t val;
135
136         PMD_INIT_FUNC_TRACE();
137         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
138                              ENETC_PM0_CMD_CFG));
139         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
140                         val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
141
142         /* Enable port */
143         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
144         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
145                         val | ENETC_PMR_EN);
146
147         /* set auto-speed for RGMII */
148         if (enetc_port_rd(&hw->hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
149                 enetc_port_wr(&hw->hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
150                 enetc_port_wr(&hw->hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_RGAUTO);
151         }
152         if (enetc_global_rd(&hw->hw,
153                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
154                 enetc_port_wr(&hw->hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
155                 enetc_port_wr(&hw->hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_XGMII);
156         }
157
158         return 0;
159 }
160
161 static void
162 enetc_dev_stop(struct rte_eth_dev *dev)
163 {
164         struct enetc_eth_hw *hw =
165                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
166         uint32_t val;
167
168         PMD_INIT_FUNC_TRACE();
169         /* Disable port */
170         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR));
171         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PMR),
172                         val & (~ENETC_PMR_EN));
173
174         val = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
175                              ENETC_PM0_CMD_CFG));
176         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PM0_CMD_CFG),
177                         val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
178 }
179
180 static void
181 enetc_dev_close(struct rte_eth_dev *dev)
182 {
183         uint16_t i;
184
185         PMD_INIT_FUNC_TRACE();
186         enetc_dev_stop(dev);
187
188         for (i = 0; i < dev->data->nb_rx_queues; i++) {
189                 enetc_rx_queue_release(dev->data->rx_queues[i]);
190                 dev->data->rx_queues[i] = NULL;
191         }
192         dev->data->nb_rx_queues = 0;
193
194         for (i = 0; i < dev->data->nb_tx_queues; i++) {
195                 enetc_tx_queue_release(dev->data->tx_queues[i]);
196                 dev->data->tx_queues[i] = NULL;
197         }
198         dev->data->nb_tx_queues = 0;
199 }
200
201 static const uint32_t *
202 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
203 {
204         static const uint32_t ptypes[] = {
205                 RTE_PTYPE_L2_ETHER,
206                 RTE_PTYPE_L3_IPV4,
207                 RTE_PTYPE_L3_IPV6,
208                 RTE_PTYPE_L4_TCP,
209                 RTE_PTYPE_L4_UDP,
210                 RTE_PTYPE_L4_SCTP,
211                 RTE_PTYPE_L4_ICMP,
212                 RTE_PTYPE_UNKNOWN
213         };
214
215         return ptypes;
216 }
217
218 /* return 0 means link status changed, -1 means not changed */
219 static int
220 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
221 {
222         struct enetc_eth_hw *hw =
223                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
224         struct rte_eth_link link;
225         uint32_t status;
226
227         PMD_INIT_FUNC_TRACE();
228
229         memset(&link, 0, sizeof(link));
230
231         status = ENETC_REG_READ(ENETC_GET_HW_ADDR(hw->hw.port,
232                                 ENETC_PM0_STATUS));
233
234         if (status & ENETC_LINK_MODE)
235                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
236         else
237                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
238
239         if (status & ENETC_LINK_STATUS)
240                 link.link_status = ETH_LINK_UP;
241         else
242                 link.link_status = ETH_LINK_DOWN;
243
244         switch (status & ENETC_LINK_SPEED_MASK) {
245         case ENETC_LINK_SPEED_1G:
246                 link.link_speed = ETH_SPEED_NUM_1G;
247                 break;
248
249         case ENETC_LINK_SPEED_100M:
250                 link.link_speed = ETH_SPEED_NUM_100M;
251                 break;
252
253         default:
254         case ENETC_LINK_SPEED_10M:
255                 link.link_speed = ETH_SPEED_NUM_10M;
256         }
257
258         return rte_eth_linkstatus_set(dev, &link);
259 }
260
261 static int
262 enetc_hardware_init(struct enetc_eth_hw *hw)
263 {
264         uint32_t psipmr = 0;
265
266         PMD_INIT_FUNC_TRACE();
267         /* Calculating and storing the base HW addresses */
268         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
269         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
270
271         /* Enabling Station Interface */
272         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.reg, ENETC_SIMR),
273                                           ENETC_SIMR_EN);
274
275         /* Setting to accept broadcast packets for each inetrface */
276         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0) |
277                   ENETC_PSIPMR_SET_VLAN_MP(0);
278         psipmr |= ENETC_PSIPMR_SET_UP(1) | ENETC_PSIPMR_SET_MP(1) |
279                   ENETC_PSIPMR_SET_VLAN_MP(1);
280         psipmr |= ENETC_PSIPMR_SET_UP(2) | ENETC_PSIPMR_SET_MP(2) |
281                   ENETC_PSIPMR_SET_VLAN_MP(2);
282
283         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMR),
284                         psipmr);
285
286         /* Enabling broadcast address */
287         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR0(0)),
288                         0xFFFFFFFF);
289         ENETC_REG_WRITE(ENETC_GET_HW_ADDR(hw->hw.port, ENETC_PSIPMAR1(0)),
290                         0xFFFF << 16);
291
292         return 0;
293 }
294
295 static void
296 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
297                     struct rte_eth_dev_info *dev_info)
298 {
299         PMD_INIT_FUNC_TRACE();
300         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
301                 .nb_max = MAX_BD_COUNT,
302                 .nb_min = MIN_BD_COUNT,
303                 .nb_align = BD_ALIGN,
304         };
305         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
306                 .nb_max = MAX_BD_COUNT,
307                 .nb_min = MIN_BD_COUNT,
308                 .nb_align = BD_ALIGN,
309         };
310         dev_info->max_rx_queues = MAX_RX_RINGS;
311         dev_info->max_tx_queues = MAX_TX_RINGS;
312         dev_info->max_rx_pktlen = 1500;
313 }
314
315 static int
316 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
317 {
318         int size;
319
320         size = nb_desc * sizeof(struct enetc_swbd);
321         txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
322         if (txr->q_swbd == NULL)
323                 return -ENOMEM;
324
325         size = nb_desc * sizeof(struct enetc_tx_bd);
326         txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
327         if (txr->bd_base == NULL) {
328                 rte_free(txr->q_swbd);
329                 txr->q_swbd = NULL;
330                 return -ENOMEM;
331         }
332
333         txr->bd_count = nb_desc;
334         txr->next_to_clean = 0;
335         txr->next_to_use = 0;
336
337         return 0;
338 }
339
340 static void
341 enetc_free_bdr(struct enetc_bdr *rxr)
342 {
343         rte_free(rxr->q_swbd);
344         rte_free(rxr->bd_base);
345         rxr->q_swbd = NULL;
346         rxr->bd_base = NULL;
347 }
348
349 static void
350 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
351 {
352         int idx = tx_ring->index;
353         uint32_t tbmr;
354         phys_addr_t bd_address;
355
356         bd_address = (phys_addr_t)
357                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
358         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
359                        lower_32_bits((uint64_t)bd_address));
360         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
361                        upper_32_bits((uint64_t)bd_address));
362         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
363                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
364
365         tbmr = ENETC_TBMR_EN;
366         /* enable ring */
367         enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
368         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
369         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
370         tx_ring->tcir = (void *)((size_t)hw->reg +
371                         ENETC_BDR(TX, idx, ENETC_TBCIR));
372         tx_ring->tcisr = (void *)((size_t)hw->reg +
373                          ENETC_BDR(TX, idx, ENETC_TBCISR));
374 }
375
376 static int
377 enetc_alloc_tx_resources(struct rte_eth_dev *dev,
378                          uint16_t queue_idx,
379                          uint16_t nb_desc)
380 {
381         int err;
382         struct enetc_bdr *tx_ring;
383         struct rte_eth_dev_data *data = dev->data;
384         struct enetc_eth_adapter *priv =
385                         ENETC_DEV_PRIVATE(data->dev_private);
386
387         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
388         if (tx_ring == NULL) {
389                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
390                 err = -ENOMEM;
391                 return -1;
392         }
393
394         err = enetc_alloc_txbdr(tx_ring, nb_desc);
395         if (err)
396                 goto fail;
397
398         tx_ring->index = queue_idx;
399         tx_ring->ndev = dev;
400         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
401         data->tx_queues[queue_idx] = tx_ring;
402
403         return 0;
404 fail:
405         rte_free(tx_ring);
406
407         return err;
408 }
409
410 static int
411 enetc_tx_queue_setup(struct rte_eth_dev *dev,
412                      uint16_t queue_idx,
413                      uint16_t nb_desc,
414                      unsigned int socket_id __rte_unused,
415                      const struct rte_eth_txconf *tx_conf __rte_unused)
416 {
417         int err = 0;
418
419         PMD_INIT_FUNC_TRACE();
420         if (nb_desc > MAX_BD_COUNT)
421                 return -1;
422
423         err = enetc_alloc_tx_resources(dev, queue_idx, nb_desc);
424
425         return err;
426 }
427
428 static void
429 enetc_tx_queue_release(void *txq)
430 {
431         if (txq == NULL)
432                 return;
433
434         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
435         struct enetc_eth_hw *eth_hw =
436                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
437         struct enetc_hw *hw;
438         struct enetc_swbd *tx_swbd;
439         int i;
440         uint32_t val;
441
442         /* Disable the ring */
443         hw = &eth_hw->hw;
444         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
445         val &= (~ENETC_TBMR_EN);
446         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
447
448         /* clean the ring*/
449         i = tx_ring->next_to_clean;
450         tx_swbd = &tx_ring->q_swbd[i];
451         while (tx_swbd->buffer_addr != NULL) {
452                 rte_pktmbuf_free(tx_swbd->buffer_addr);
453                 tx_swbd->buffer_addr = NULL;
454                 tx_swbd++;
455                 i++;
456                 if (unlikely(i == tx_ring->bd_count)) {
457                         i = 0;
458                         tx_swbd = &tx_ring->q_swbd[i];
459                 }
460         }
461
462         enetc_free_bdr(tx_ring);
463         rte_free(tx_ring);
464 }
465
466 static int
467 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
468                   uint16_t nb_rx_desc)
469 {
470         int size;
471
472         size = nb_rx_desc * sizeof(struct enetc_swbd);
473         rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
474         if (rxr->q_swbd == NULL)
475                 return -ENOMEM;
476
477         size = nb_rx_desc * sizeof(union enetc_rx_bd);
478         rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
479         if (rxr->bd_base == NULL) {
480                 rte_free(rxr->q_swbd);
481                 rxr->q_swbd = NULL;
482                 return -ENOMEM;
483         }
484
485         rxr->bd_count = nb_rx_desc;
486         rxr->next_to_clean = 0;
487         rxr->next_to_use = 0;
488         rxr->next_to_alloc = 0;
489
490         return 0;
491 }
492
493 static void
494 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
495                   struct rte_mempool *mb_pool)
496 {
497         int idx = rx_ring->index;
498         uint16_t buf_size;
499         phys_addr_t bd_address;
500
501         bd_address = (phys_addr_t)
502                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
503         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
504                        lower_32_bits((uint64_t)bd_address));
505         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
506                        upper_32_bits((uint64_t)bd_address));
507         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
508                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
509
510         rx_ring->mb_pool = mb_pool;
511         rx_ring->rcir = (void *)((size_t)hw->reg +
512                         ENETC_BDR(RX, idx, ENETC_RBCIR));
513         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
514         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
515                    RTE_PKTMBUF_HEADROOM);
516         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
517         /* enable ring */
518         enetc_rxbdr_wr(hw, idx, ENETC_RBMR, ENETC_RBMR_EN);
519         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
520 }
521
522 static int
523 enetc_alloc_rx_resources(struct rte_eth_dev *dev,
524                          uint16_t rx_queue_id,
525                          uint16_t nb_rx_desc,
526                          struct rte_mempool *mb_pool)
527 {
528         int err;
529         struct enetc_bdr *rx_ring;
530         struct rte_eth_dev_data *data =  dev->data;
531         struct enetc_eth_adapter *adapter =
532                         ENETC_DEV_PRIVATE(data->dev_private);
533
534         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
535         if (rx_ring == NULL) {
536                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
537                 err = -ENOMEM;
538                 return err;
539         }
540
541         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
542         if (err)
543                 goto fail;
544
545         rx_ring->index = rx_queue_id;
546         rx_ring->ndev = dev;
547         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
548         data->rx_queues[rx_queue_id] = rx_ring;
549
550         return 0;
551 fail:
552         rte_free(rx_ring);
553
554         return err;
555 }
556
557 static int
558 enetc_rx_queue_setup(struct rte_eth_dev *dev,
559                      uint16_t rx_queue_id,
560                      uint16_t nb_rx_desc,
561                      unsigned int socket_id __rte_unused,
562                      const struct rte_eth_rxconf *rx_conf __rte_unused,
563                      struct rte_mempool *mb_pool)
564 {
565         int err = 0;
566
567         PMD_INIT_FUNC_TRACE();
568         if (nb_rx_desc > MAX_BD_COUNT)
569                 return -1;
570
571         err = enetc_alloc_rx_resources(dev, rx_queue_id,
572                                        nb_rx_desc,
573                                        mb_pool);
574
575         return err;
576 }
577
578 static void
579 enetc_rx_queue_release(void *rxq)
580 {
581         if (rxq == NULL)
582                 return;
583
584         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
585         struct enetc_eth_hw *eth_hw =
586                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
587         struct enetc_swbd *q_swbd;
588         struct enetc_hw *hw;
589         uint32_t val;
590         int i;
591
592         /* Disable the ring */
593         hw = &eth_hw->hw;
594         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
595         val &= (~ENETC_RBMR_EN);
596         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
597
598         /* Clean the ring */
599         i = rx_ring->next_to_clean;
600         q_swbd = &rx_ring->q_swbd[i];
601         while (i != rx_ring->next_to_use) {
602                 rte_pktmbuf_free(q_swbd->buffer_addr);
603                 q_swbd->buffer_addr = NULL;
604                 q_swbd++;
605                 i++;
606                 if (unlikely(i == rx_ring->bd_count)) {
607                         i = 0;
608                         q_swbd = &rx_ring->q_swbd[i];
609                 }
610         }
611
612         enetc_free_bdr(rx_ring);
613         rte_free(rx_ring);
614 }
615
616 static
617 int enetc_stats_get(struct rte_eth_dev *dev,
618                     struct rte_eth_stats *stats)
619 {
620         struct enetc_eth_hw *hw =
621                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         struct enetc_hw *enetc_hw = &hw->hw;
623
624         /* Total received packets, bad + good, if we want to get counters of
625          * only good received packets then use ENETC_PM0_RFRM,
626          * ENETC_PM0_TFRM registers.
627          */
628         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
629         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
630         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
631         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
632         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
633          * truncated packets
634          */
635         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
636         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
637         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
638
639         return 0;
640 }
641
642 static void
643 enetc_stats_reset(struct rte_eth_dev *dev)
644 {
645         struct enetc_eth_hw *hw =
646                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647         struct enetc_hw *enetc_hw = &hw->hw;
648
649         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
650 }
651
652 static int
653 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
654                            struct rte_pci_device *pci_dev)
655 {
656         return rte_eth_dev_pci_generic_probe(pci_dev,
657                                              sizeof(struct enetc_eth_adapter),
658                                              enetc_dev_init);
659 }
660
661 static int
662 enetc_pci_remove(struct rte_pci_device *pci_dev)
663 {
664         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
665 }
666
667 static struct rte_pci_driver rte_enetc_pmd = {
668         .id_table = pci_id_enetc_map,
669         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
670         .probe = enetc_pci_probe,
671         .remove = enetc_pci_remove,
672 };
673
674 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
675 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
676 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
677
678 RTE_INIT(enetc_pmd_init_log)
679 {
680         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
681         if (enetc_logtype_pmd >= 0)
682                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
683 }