1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020-2021 NXP
5 #include <ethdev_vdev.h>
6 #include <ethdev_driver.h>
8 #include "enet_pmd_logs.h"
9 #include "enet_ethdev.h"
10 #include "enet_regs.h"
13 #define ENETFEC_NAME_PMD net_enetfec
15 /* FEC receive acceleration */
16 #define ENETFEC_RACC_IPDIS RTE_BIT32(1)
17 #define ENETFEC_RACC_PRODIS RTE_BIT32(2)
18 #define ENETFEC_RACC_SHIFT16 RTE_BIT32(7)
19 #define ENETFEC_RACC_OPTIONS (ENETFEC_RACC_IPDIS | \
22 #define ENETFEC_PAUSE_FLAG_AUTONEG 0x1
23 #define ENETFEC_PAUSE_FLAG_ENABLE 0x2
25 /* Pause frame field and FIFO threshold */
26 #define ENETFEC_FCE RTE_BIT32(5)
27 #define ENETFEC_RSEM_V 0x84
28 #define ENETFEC_RSFL_V 16
29 #define ENETFEC_RAEM_V 0x8
30 #define ENETFEC_RAFL_V 0x8
31 #define ENETFEC_OPD_V 0xFFF0
33 #define NUM_OF_BD_QUEUES 6
35 /* Supported Rx offloads */
36 static uint64_t dev_rx_offloads_sup =
37 RTE_ETH_RX_OFFLOAD_CHECKSUM |
38 RTE_ETH_RX_OFFLOAD_VLAN;
41 * This function is called to start or restart the ENETFEC during a link
42 * change, transmit timeout, or to reconfigure the ENETFEC. The network
43 * packet processing for this device must be stopped before this call.
46 enetfec_restart(struct rte_eth_dev *dev)
48 struct enetfec_private *fep = dev->data->dev_private;
49 uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
50 uint32_t ecntl = ENETFEC_ETHEREN;
53 /* Clear any outstanding interrupt. */
54 writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
57 if (fep->full_duplex == FULL_DUPLEX) {
59 rte_write32(rte_cpu_to_le_32(0x04),
60 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
64 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
67 if (fep->quirks & QUIRK_RACC) {
68 val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
70 val |= ENETFEC_RACC_SHIFT16;
71 val &= ~ENETFEC_RACC_OPTIONS;
72 rte_write32(rte_cpu_to_le_32(val),
73 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
74 rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
75 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
79 * The phy interface and speed need to get configured
80 * differently on enet-mac.
82 if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
83 /* Enable flow control and length check */
84 rcntl |= 0x40000000 | 0x00000020;
86 /* RGMII, RMII or MII */
87 rcntl |= RTE_BIT32(6);
88 ecntl |= RTE_BIT32(5);
91 /* enable pause frame*/
92 if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
93 ((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
94 /*&& ndev->phydev && ndev->phydev->pause*/)) {
97 /* set FIFO threshold parameter to reduce overrun */
98 rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
99 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
100 rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
101 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
102 rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
103 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
104 rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
105 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
108 rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
109 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
111 rcntl &= ~ENETFEC_FCE;
114 rte_write32(rte_cpu_to_le_32(rcntl),
115 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
117 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
118 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
120 if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
121 /* enable ENETFEC endian swap */
123 /* enable ENETFEC store and forward mode */
124 rte_write32(rte_cpu_to_le_32(1 << 8),
125 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
129 if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
130 fep->rgmii_txc_delay)
131 ecntl |= ENETFEC_TXC_DLY;
132 if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
133 fep->rgmii_rxc_delay)
134 ecntl |= ENETFEC_RXC_DLY;
135 /* Enable the MIB statistic event counters */
136 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
139 fep->enetfec_e_cntl = ecntl;
140 /* And last, enable the transmit and receive processing */
141 rte_write32(rte_cpu_to_le_32(ecntl),
142 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
147 enetfec_eth_configure(struct rte_eth_dev *dev)
149 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
150 ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
156 enetfec_eth_start(struct rte_eth_dev *dev)
158 enetfec_restart(dev);
163 /* ENETFEC disable function.
164 * @param[in] base ENETFEC base address
167 enetfec_disable(struct enetfec_private *fep)
169 rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
170 & ~(fep->enetfec_e_cntl),
171 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
175 enetfec_eth_stop(struct rte_eth_dev *dev)
177 struct enetfec_private *fep = dev->data->dev_private;
179 dev->data->dev_started = 0;
180 enetfec_disable(fep);
186 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
187 struct rte_eth_dev_info *dev_info)
189 dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
190 dev_info->max_rx_queues = ENETFEC_MAX_Q;
191 dev_info->max_tx_queues = ENETFEC_MAX_Q;
192 dev_info->rx_offload_capa = dev_rx_offloads_sup;
196 static const unsigned short offset_des_active_rxq[] = {
197 ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
200 static const unsigned short offset_des_active_txq[] = {
201 ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
205 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
208 unsigned int socket_id __rte_unused,
209 const struct rte_eth_txconf *tx_conf)
211 struct enetfec_private *fep = dev->data->dev_private;
213 struct bufdesc *bdp, *bd_base;
214 struct enetfec_priv_tx_q *txq;
216 unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
217 sizeof(struct bufdesc);
218 unsigned int dsize_log2 = fls64(dsize);
220 /* Tx deferred start is not supported */
221 if (tx_conf->tx_deferred_start) {
222 ENETFEC_PMD_ERR("Tx deferred start not supported");
226 /* allocate transmit queue */
227 txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
229 ENETFEC_PMD_ERR("transmit queue allocation failed");
233 if (nb_desc > MAX_TX_BD_RING_SIZE) {
234 nb_desc = MAX_TX_BD_RING_SIZE;
235 ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
237 txq->bd.ring_size = nb_desc;
238 fep->total_tx_ring_size += txq->bd.ring_size;
239 fep->tx_queues[queue_idx] = txq;
241 rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
242 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
244 /* Set transmit descriptor base. */
245 txq = fep->tx_queues[queue_idx];
247 size = dsize * txq->bd.ring_size;
248 bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
249 txq->bd.queue_id = queue_idx;
250 txq->bd.base = bd_base;
251 txq->bd.cur = bd_base;
252 txq->bd.d_size = dsize;
253 txq->bd.d_size_log2 = dsize_log2;
254 txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
255 offset_des_active_txq[queue_idx];
256 bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
257 txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
261 for (i = 0; i < txq->bd.ring_size; i++) {
262 /* Initialize the BD for every fragment in the page. */
263 rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
264 if (txq->tx_mbuf[i] != NULL) {
265 rte_pktmbuf_free(txq->tx_mbuf[i]);
266 txq->tx_mbuf[i] = NULL;
268 rte_write32(0, &bdp->bd_bufaddr);
269 bdp = enet_get_nextdesc(bdp, &txq->bd);
272 /* Set the last buffer to wrap */
273 bdp = enet_get_prevdesc(bdp, &txq->bd);
274 rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
275 rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
277 dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
282 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
285 unsigned int socket_id __rte_unused,
286 const struct rte_eth_rxconf *rx_conf,
287 struct rte_mempool *mb_pool)
289 struct enetfec_private *fep = dev->data->dev_private;
291 struct bufdesc *bd_base;
293 struct enetfec_priv_rx_q *rxq;
295 unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
296 sizeof(struct bufdesc);
297 unsigned int dsize_log2 = fls64(dsize);
299 /* Rx deferred start is not supported */
300 if (rx_conf->rx_deferred_start) {
301 ENETFEC_PMD_ERR("Rx deferred start not supported");
305 /* allocate receive queue */
306 rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
308 ENETFEC_PMD_ERR("receive queue allocation failed");
312 if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
313 nb_rx_desc = MAX_RX_BD_RING_SIZE;
314 ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
317 rxq->bd.ring_size = nb_rx_desc;
318 fep->total_rx_ring_size += rxq->bd.ring_size;
319 fep->rx_queues[queue_idx] = rxq;
321 rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
322 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
323 rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
324 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
326 /* Set receive descriptor base. */
327 rxq = fep->rx_queues[queue_idx];
329 size = dsize * rxq->bd.ring_size;
330 bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
331 rxq->bd.queue_id = queue_idx;
332 rxq->bd.base = bd_base;
333 rxq->bd.cur = bd_base;
334 rxq->bd.d_size = dsize;
335 rxq->bd.d_size_log2 = dsize_log2;
336 rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
337 offset_des_active_rxq[queue_idx];
338 bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
339 rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
345 for (i = 0; i < nb_rx_desc; i++) {
346 /* Initialize Rx buffers from pktmbuf pool */
347 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
349 ENETFEC_PMD_ERR("mbuf failed");
353 /* Get the virtual address & physical address */
354 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
357 rxq->rx_mbuf[i] = mbuf;
358 rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
360 bdp = enet_get_nextdesc(bdp, &rxq->bd);
363 /* Initialize the receive buffer descriptors. */
365 for (i = 0; i < rxq->bd.ring_size; i++) {
366 /* Initialize the BD for every fragment in the page. */
367 if (rte_read32(&bdp->bd_bufaddr) > 0)
368 rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
371 rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
373 bdp = enet_get_nextdesc(bdp, &rxq->bd);
376 /* Set the last buffer to wrap */
377 bdp = enet_get_prevdesc(bdp, &rxq->bd);
378 rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
379 rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
380 dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
381 rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
385 for (i = 0; i < nb_rx_desc; i++) {
386 if (rxq->rx_mbuf[i] != NULL) {
387 rte_pktmbuf_free(rxq->rx_mbuf[i]);
388 rxq->rx_mbuf[i] = NULL;
395 static const struct eth_dev_ops enetfec_ops = {
396 .dev_configure = enetfec_eth_configure,
397 .dev_start = enetfec_eth_start,
398 .dev_stop = enetfec_eth_stop,
399 .dev_infos_get = enetfec_eth_info,
400 .rx_queue_setup = enetfec_rx_queue_setup,
401 .tx_queue_setup = enetfec_tx_queue_setup
405 enetfec_eth_init(struct rte_eth_dev *dev)
407 struct enetfec_private *fep = dev->data->dev_private;
409 fep->full_duplex = FULL_DUPLEX;
410 dev->dev_ops = &enetfec_ops;
411 rte_eth_dev_probing_finish(dev);
416 pmd_enetfec_probe(struct rte_vdev_device *vdev)
418 struct rte_eth_dev *dev = NULL;
419 struct enetfec_private *fep;
425 name = rte_vdev_device_name(vdev);
426 ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
428 dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
432 /* setup board info structure */
433 fep = dev->data->dev_private;
436 fep->max_rx_queues = ENETFEC_MAX_Q;
437 fep->max_tx_queues = ENETFEC_MAX_Q;
438 fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
441 rc = enetfec_configure();
444 rc = config_enetfec_uio(fep);
448 /* Get the BD size for distributing among six queues */
449 bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
451 for (i = 0; i < fep->max_tx_queues; i++) {
452 fep->dma_baseaddr_t[i] = fep->bd_addr_v;
453 fep->bd_addr_p_t[i] = fep->bd_addr_p;
454 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
455 fep->bd_addr_p = fep->bd_addr_p + bdsize;
457 for (i = 0; i < fep->max_rx_queues; i++) {
458 fep->dma_baseaddr_r[i] = fep->bd_addr_v;
459 fep->bd_addr_p_r[i] = fep->bd_addr_p;
460 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
461 fep->bd_addr_p = fep->bd_addr_p + bdsize;
464 rc = enetfec_eth_init(dev);
471 ENETFEC_PMD_ERR("Failed to init");
476 pmd_enetfec_remove(struct rte_vdev_device *vdev)
478 struct rte_eth_dev *eth_dev = NULL;
481 /* find the ethdev entry */
482 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
486 ret = rte_eth_dev_release_port(eth_dev);
490 ENETFEC_PMD_INFO("Release enetfec sw device");
494 static struct rte_vdev_driver pmd_enetfec_drv = {
495 .probe = pmd_enetfec_probe,
496 .remove = pmd_enetfec_remove,
499 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
500 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);