1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020-2021 NXP
7 #include <ethdev_vdev.h>
8 #include <ethdev_driver.h>
11 #include "enet_pmd_logs.h"
12 #include "enet_ethdev.h"
13 #include "enet_regs.h"
16 #define ENETFEC_NAME_PMD net_enetfec
18 /* FEC receive acceleration */
19 #define ENETFEC_RACC_IPDIS RTE_BIT32(1)
20 #define ENETFEC_RACC_PRODIS RTE_BIT32(2)
21 #define ENETFEC_RACC_SHIFT16 RTE_BIT32(7)
22 #define ENETFEC_RACC_OPTIONS (ENETFEC_RACC_IPDIS | \
25 #define ENETFEC_PAUSE_FLAG_AUTONEG 0x1
26 #define ENETFEC_PAUSE_FLAG_ENABLE 0x2
28 /* Pause frame field and FIFO threshold */
29 #define ENETFEC_FCE RTE_BIT32(5)
30 #define ENETFEC_RSEM_V 0x84
31 #define ENETFEC_RSFL_V 16
32 #define ENETFEC_RAEM_V 0x8
33 #define ENETFEC_RAFL_V 0x8
34 #define ENETFEC_OPD_V 0xFFF0
36 /* Extended buffer descriptor */
37 #define ENETFEC_EXTENDED_BD 0
38 #define NUM_OF_BD_QUEUES 6
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42 RTE_ETH_RX_OFFLOAD_CHECKSUM |
43 RTE_ETH_RX_OFFLOAD_VLAN;
46 * This function is called to start or restart the ENETFEC during a link
47 * change, transmit timeout, or to reconfigure the ENETFEC. The network
48 * packet processing for this device must be stopped before this call.
51 enetfec_restart(struct rte_eth_dev *dev)
53 struct enetfec_private *fep = dev->data->dev_private;
54 uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
55 uint32_t ecntl = ENETFEC_ETHEREN;
58 /* Clear any outstanding interrupt. */
59 writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
62 if (fep->full_duplex == FULL_DUPLEX) {
64 rte_write32(rte_cpu_to_le_32(0x04),
65 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
69 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
72 if (fep->quirks & QUIRK_RACC) {
73 val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
75 val |= ENETFEC_RACC_SHIFT16;
76 if (fep->flag_csum & RX_FLAG_CSUM_EN)
78 val |= ENETFEC_RACC_OPTIONS;
80 val &= ~ENETFEC_RACC_OPTIONS;
81 rte_write32(rte_cpu_to_le_32(val),
82 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
83 rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
84 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
88 * The phy interface and speed need to get configured
89 * differently on enet-mac.
91 if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
92 /* Enable flow control and length check */
93 rcntl |= 0x40000000 | 0x00000020;
95 /* RGMII, RMII or MII */
96 rcntl |= RTE_BIT32(6);
97 ecntl |= RTE_BIT32(5);
100 /* enable pause frame*/
101 if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
102 ((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
103 /*&& ndev->phydev && ndev->phydev->pause*/)) {
104 rcntl |= ENETFEC_FCE;
106 /* set FIFO threshold parameter to reduce overrun */
107 rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
108 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
109 rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
110 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
111 rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
112 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
113 rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
114 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
117 rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
118 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
120 rcntl &= ~ENETFEC_FCE;
123 rte_write32(rte_cpu_to_le_32(rcntl),
124 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
126 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
127 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
129 if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
130 /* enable ENETFEC endian swap */
132 /* enable ENETFEC store and forward mode */
133 rte_write32(rte_cpu_to_le_32(1 << 8),
134 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
138 if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
139 fep->rgmii_txc_delay)
140 ecntl |= ENETFEC_TXC_DLY;
141 if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
142 fep->rgmii_rxc_delay)
143 ecntl |= ENETFEC_RXC_DLY;
144 /* Enable the MIB statistic event counters */
145 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
148 fep->enetfec_e_cntl = ecntl;
149 /* And last, enable the transmit and receive processing */
150 rte_write32(rte_cpu_to_le_32(ecntl),
151 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
156 enet_free_buffers(struct rte_eth_dev *dev)
158 struct enetfec_private *fep = dev->data->dev_private;
160 struct rte_mbuf *mbuf;
162 struct enetfec_priv_rx_q *rxq;
163 struct enetfec_priv_tx_q *txq;
165 for (q = 0; q < dev->data->nb_rx_queues; q++) {
166 rxq = fep->rx_queues[q];
168 for (i = 0; i < rxq->bd.ring_size; i++) {
169 mbuf = rxq->rx_mbuf[i];
170 rxq->rx_mbuf[i] = NULL;
171 rte_pktmbuf_free(mbuf);
172 bdp = enet_get_nextdesc(bdp, &rxq->bd);
176 for (q = 0; q < dev->data->nb_tx_queues; q++) {
177 txq = fep->tx_queues[q];
179 for (i = 0; i < txq->bd.ring_size; i++) {
180 mbuf = txq->tx_mbuf[i];
181 txq->tx_mbuf[i] = NULL;
182 rte_pktmbuf_free(mbuf);
188 enetfec_eth_configure(struct rte_eth_dev *dev)
190 struct enetfec_private *fep = dev->data->dev_private;
192 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
193 fep->flag_csum |= RX_FLAG_CSUM_EN;
195 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
196 ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
202 enetfec_eth_start(struct rte_eth_dev *dev)
204 enetfec_restart(dev);
205 dev->rx_pkt_burst = &enetfec_recv_pkts;
206 dev->tx_pkt_burst = &enetfec_xmit_pkts;
211 /* ENETFEC disable function.
212 * @param[in] base ENETFEC base address
215 enetfec_disable(struct enetfec_private *fep)
217 rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
218 & ~(fep->enetfec_e_cntl),
219 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
223 enetfec_eth_stop(struct rte_eth_dev *dev)
225 struct enetfec_private *fep = dev->data->dev_private;
227 dev->data->dev_started = 0;
228 enetfec_disable(fep);
234 enetfec_eth_close(struct rte_eth_dev *dev)
236 enet_free_buffers(dev);
241 enetfec_eth_link_update(struct rte_eth_dev *dev,
242 int wait_to_complete __rte_unused)
244 struct rte_eth_link link;
245 unsigned int lstatus = 1;
247 memset(&link, 0, sizeof(struct rte_eth_link));
249 link.link_status = lstatus;
250 link.link_speed = RTE_ETH_SPEED_NUM_1G;
252 ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
255 return rte_eth_linkstatus_set(dev, &link);
259 enetfec_promiscuous_enable(struct rte_eth_dev *dev)
261 struct enetfec_private *fep = dev->data->dev_private;
264 tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
267 rte_write32(rte_cpu_to_le_32(tmp),
268 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
274 enetfec_multicast_enable(struct rte_eth_dev *dev)
276 struct enetfec_private *fep = dev->data->dev_private;
278 rte_write32(rte_cpu_to_le_32(0xffffffff),
279 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
280 rte_write32(rte_cpu_to_le_32(0xffffffff),
281 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
282 dev->data->all_multicast = 1;
284 rte_write32(rte_cpu_to_le_32(0x04400002),
285 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
286 rte_write32(rte_cpu_to_le_32(0x10800049),
287 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
292 /* Set a MAC change in hardware. */
294 enetfec_set_mac_address(struct rte_eth_dev *dev,
295 struct rte_ether_addr *addr)
297 struct enetfec_private *fep = dev->data->dev_private;
299 writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
300 (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
301 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
302 writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
303 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
305 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
311 enetfec_stats_get(struct rte_eth_dev *dev,
312 struct rte_eth_stats *stats)
314 struct enetfec_private *fep = dev->data->dev_private;
315 struct rte_eth_stats *eth_stats = &fep->stats;
317 stats->ipackets = eth_stats->ipackets;
318 stats->ibytes = eth_stats->ibytes;
319 stats->ierrors = eth_stats->ierrors;
320 stats->opackets = eth_stats->opackets;
321 stats->obytes = eth_stats->obytes;
322 stats->oerrors = eth_stats->oerrors;
323 stats->rx_nombuf = eth_stats->rx_nombuf;
329 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
330 struct rte_eth_dev_info *dev_info)
332 dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
333 dev_info->max_rx_queues = ENETFEC_MAX_Q;
334 dev_info->max_tx_queues = ENETFEC_MAX_Q;
335 dev_info->rx_offload_capa = dev_rx_offloads_sup;
340 enet_free_queue(struct rte_eth_dev *dev)
342 struct enetfec_private *fep = dev->data->dev_private;
345 for (i = 0; i < dev->data->nb_rx_queues; i++)
346 rte_free(fep->rx_queues[i]);
347 for (i = 0; i < dev->data->nb_tx_queues; i++)
348 rte_free(fep->rx_queues[i]);
351 static const unsigned short offset_des_active_rxq[] = {
352 ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
355 static const unsigned short offset_des_active_txq[] = {
356 ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
360 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
363 unsigned int socket_id __rte_unused,
364 const struct rte_eth_txconf *tx_conf)
366 struct enetfec_private *fep = dev->data->dev_private;
368 struct bufdesc *bdp, *bd_base;
369 struct enetfec_priv_tx_q *txq;
371 unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
372 sizeof(struct bufdesc);
373 unsigned int dsize_log2 = fls64(dsize);
375 /* Tx deferred start is not supported */
376 if (tx_conf->tx_deferred_start) {
377 ENETFEC_PMD_ERR("Tx deferred start not supported");
381 /* allocate transmit queue */
382 txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
384 ENETFEC_PMD_ERR("transmit queue allocation failed");
388 if (nb_desc > MAX_TX_BD_RING_SIZE) {
389 nb_desc = MAX_TX_BD_RING_SIZE;
390 ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
392 txq->bd.ring_size = nb_desc;
393 fep->total_tx_ring_size += txq->bd.ring_size;
394 fep->tx_queues[queue_idx] = txq;
396 rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
397 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
399 /* Set transmit descriptor base. */
400 txq = fep->tx_queues[queue_idx];
402 size = dsize * txq->bd.ring_size;
403 bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
404 txq->bd.queue_id = queue_idx;
405 txq->bd.base = bd_base;
406 txq->bd.cur = bd_base;
407 txq->bd.d_size = dsize;
408 txq->bd.d_size_log2 = dsize_log2;
409 txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
410 offset_des_active_txq[queue_idx];
411 bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
412 txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
416 for (i = 0; i < txq->bd.ring_size; i++) {
417 /* Initialize the BD for every fragment in the page. */
418 rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
419 if (txq->tx_mbuf[i] != NULL) {
420 rte_pktmbuf_free(txq->tx_mbuf[i]);
421 txq->tx_mbuf[i] = NULL;
423 rte_write32(0, &bdp->bd_bufaddr);
424 bdp = enet_get_nextdesc(bdp, &txq->bd);
427 /* Set the last buffer to wrap */
428 bdp = enet_get_prevdesc(bdp, &txq->bd);
429 rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
430 rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
432 dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
437 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
440 unsigned int socket_id __rte_unused,
441 const struct rte_eth_rxconf *rx_conf,
442 struct rte_mempool *mb_pool)
444 struct enetfec_private *fep = dev->data->dev_private;
446 struct bufdesc *bd_base;
448 struct enetfec_priv_rx_q *rxq;
450 unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
451 sizeof(struct bufdesc);
452 unsigned int dsize_log2 = fls64(dsize);
454 /* Rx deferred start is not supported */
455 if (rx_conf->rx_deferred_start) {
456 ENETFEC_PMD_ERR("Rx deferred start not supported");
460 if (queue_idx >= ENETFEC_MAX_Q) {
461 ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d\n",
462 queue_idx, ENETFEC_MAX_Q);
466 /* allocate receive queue */
467 rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
469 ENETFEC_PMD_ERR("receive queue allocation failed");
473 if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
474 nb_rx_desc = MAX_RX_BD_RING_SIZE;
475 ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
478 rxq->bd.ring_size = nb_rx_desc;
479 fep->total_rx_ring_size += rxq->bd.ring_size;
480 fep->rx_queues[queue_idx] = rxq;
482 rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
483 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
484 rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
485 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
487 /* Set receive descriptor base. */
488 rxq = fep->rx_queues[queue_idx];
490 size = dsize * rxq->bd.ring_size;
491 bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
492 rxq->bd.queue_id = queue_idx;
493 rxq->bd.base = bd_base;
494 rxq->bd.cur = bd_base;
495 rxq->bd.d_size = dsize;
496 rxq->bd.d_size_log2 = dsize_log2;
497 rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
498 offset_des_active_rxq[queue_idx];
499 bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
500 rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
506 for (i = 0; i < nb_rx_desc; i++) {
507 /* Initialize Rx buffers from pktmbuf pool */
508 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
510 ENETFEC_PMD_ERR("mbuf failed");
514 /* Get the virtual address & physical address */
515 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
518 rxq->rx_mbuf[i] = mbuf;
519 rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
521 bdp = enet_get_nextdesc(bdp, &rxq->bd);
524 /* Initialize the receive buffer descriptors. */
526 for (i = 0; i < rxq->bd.ring_size; i++) {
527 /* Initialize the BD for every fragment in the page. */
528 if (rte_read32(&bdp->bd_bufaddr) > 0)
529 rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
532 rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
534 bdp = enet_get_nextdesc(bdp, &rxq->bd);
537 /* Set the last buffer to wrap */
538 bdp = enet_get_prevdesc(bdp, &rxq->bd);
539 rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
540 rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
541 dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
542 rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
546 for (i = 0; i < nb_rx_desc; i++) {
547 if (rxq->rx_mbuf[i] != NULL) {
548 rte_pktmbuf_free(rxq->rx_mbuf[i]);
549 rxq->rx_mbuf[i] = NULL;
556 static const struct eth_dev_ops enetfec_ops = {
557 .dev_configure = enetfec_eth_configure,
558 .dev_start = enetfec_eth_start,
559 .dev_stop = enetfec_eth_stop,
560 .dev_close = enetfec_eth_close,
561 .link_update = enetfec_eth_link_update,
562 .promiscuous_enable = enetfec_promiscuous_enable,
563 .allmulticast_enable = enetfec_multicast_enable,
564 .mac_addr_set = enetfec_set_mac_address,
565 .stats_get = enetfec_stats_get,
566 .dev_infos_get = enetfec_eth_info,
567 .rx_queue_setup = enetfec_rx_queue_setup,
568 .tx_queue_setup = enetfec_tx_queue_setup
572 enetfec_eth_init(struct rte_eth_dev *dev)
574 struct enetfec_private *fep = dev->data->dev_private;
576 fep->full_duplex = FULL_DUPLEX;
577 dev->dev_ops = &enetfec_ops;
578 rte_eth_dev_probing_finish(dev);
584 pmd_enetfec_probe(struct rte_vdev_device *vdev)
586 struct rte_eth_dev *dev = NULL;
587 struct enetfec_private *fep;
592 struct rte_ether_addr macaddr = {
593 .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
596 name = rte_vdev_device_name(vdev);
597 ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
599 dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
603 /* setup board info structure */
604 fep = dev->data->dev_private;
607 fep->max_rx_queues = ENETFEC_MAX_Q;
608 fep->max_tx_queues = ENETFEC_MAX_Q;
609 fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
612 rc = enetfec_configure();
615 rc = config_enetfec_uio(fep);
619 /* Get the BD size for distributing among six queues */
620 bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
622 for (i = 0; i < fep->max_tx_queues; i++) {
623 fep->dma_baseaddr_t[i] = fep->bd_addr_v;
624 fep->bd_addr_p_t[i] = fep->bd_addr_p;
625 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
626 fep->bd_addr_p = fep->bd_addr_p + bdsize;
628 for (i = 0; i < fep->max_rx_queues; i++) {
629 fep->dma_baseaddr_r[i] = fep->bd_addr_v;
630 fep->bd_addr_p_r[i] = fep->bd_addr_p;
631 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
632 fep->bd_addr_p = fep->bd_addr_p + bdsize;
635 /* Copy the station address into the dev structure, */
636 dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
637 if (dev->data->mac_addrs == NULL) {
638 ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
645 * Set default mac address
647 enetfec_set_mac_address(dev, &macaddr);
649 fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
650 rc = enetfec_eth_init(dev);
657 ENETFEC_PMD_ERR("Failed to init");
659 rte_eth_dev_release_port(dev);
664 pmd_enetfec_remove(struct rte_vdev_device *vdev)
666 struct rte_eth_dev *eth_dev = NULL;
667 struct enetfec_private *fep;
668 struct enetfec_priv_rx_q *rxq;
671 /* find the ethdev entry */
672 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
676 fep = eth_dev->data->dev_private;
677 /* Free descriptor base of first RX queue as it was configured
678 * first in enetfec_eth_init().
680 rxq = fep->rx_queues[0];
681 rte_free(rxq->bd.base);
682 enet_free_queue(eth_dev);
683 enetfec_eth_stop(eth_dev);
685 ret = rte_eth_dev_release_port(eth_dev);
689 ENETFEC_PMD_INFO("Release enetfec sw device");
690 enetfec_cleanup(fep);
695 static struct rte_vdev_driver pmd_enetfec_drv = {
696 .probe = pmd_enetfec_probe,
697 .remove = pmd_enetfec_remove,
700 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
701 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);