714f8ac7eccc840f66c0425e08d5f17b78815f43
[dpdk.git] / drivers / net / enetfec / enet_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020-2021 NXP
3  */
4
5 #include <ethdev_vdev.h>
6 #include <ethdev_driver.h>
7 #include <rte_io.h>
8 #include "enet_pmd_logs.h"
9 #include "enet_ethdev.h"
10 #include "enet_regs.h"
11 #include "enet_uio.h"
12
13 #define ENETFEC_NAME_PMD                net_enetfec
14
15 /* FEC receive acceleration */
16 #define ENETFEC_RACC_IPDIS              RTE_BIT32(1)
17 #define ENETFEC_RACC_PRODIS             RTE_BIT32(2)
18 #define ENETFEC_RACC_SHIFT16            RTE_BIT32(7)
19 #define ENETFEC_RACC_OPTIONS            (ENETFEC_RACC_IPDIS | \
20                                                 ENETFEC_RACC_PRODIS)
21
22 #define ENETFEC_PAUSE_FLAG_AUTONEG      0x1
23 #define ENETFEC_PAUSE_FLAG_ENABLE       0x2
24
25 /* Pause frame field and FIFO threshold */
26 #define ENETFEC_FCE                     RTE_BIT32(5)
27 #define ENETFEC_RSEM_V                  0x84
28 #define ENETFEC_RSFL_V                  16
29 #define ENETFEC_RAEM_V                  0x8
30 #define ENETFEC_RAFL_V                  0x8
31 #define ENETFEC_OPD_V                   0xFFF0
32
33 /* Extended buffer descriptor */
34 #define ENETFEC_EXTENDED_BD             0
35 #define NUM_OF_BD_QUEUES                6
36
37 /* Supported Rx offloads */
38 static uint64_t dev_rx_offloads_sup =
39                 RTE_ETH_RX_OFFLOAD_CHECKSUM |
40                 RTE_ETH_RX_OFFLOAD_VLAN;
41
42 /*
43  * This function is called to start or restart the ENETFEC during a link
44  * change, transmit timeout, or to reconfigure the ENETFEC. The network
45  * packet processing for this device must be stopped before this call.
46  */
47 static void
48 enetfec_restart(struct rte_eth_dev *dev)
49 {
50         struct enetfec_private *fep = dev->data->dev_private;
51         uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
52         uint32_t ecntl = ENETFEC_ETHEREN;
53         uint32_t val;
54
55         /* Clear any outstanding interrupt. */
56         writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
57
58         /* Enable MII mode */
59         if (fep->full_duplex == FULL_DUPLEX) {
60                 /* FD enable */
61                 rte_write32(rte_cpu_to_le_32(0x04),
62                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
63         } else {
64         /* No Rcv on Xmit */
65                 rcntl |= 0x02;
66                 rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
67         }
68
69         if (fep->quirks & QUIRK_RACC) {
70                 val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
71                 /* align IP header */
72                 val |= ENETFEC_RACC_SHIFT16;
73                 if (fep->flag_csum & RX_FLAG_CSUM_EN)
74                         /* set RX checksum */
75                         val |= ENETFEC_RACC_OPTIONS;
76                 else
77                         val &= ~ENETFEC_RACC_OPTIONS;
78                 rte_write32(rte_cpu_to_le_32(val),
79                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
80                 rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
81                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
82         }
83
84         /*
85          * The phy interface and speed need to get configured
86          * differently on enet-mac.
87          */
88         if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
89                 /* Enable flow control and length check */
90                 rcntl |= 0x40000000 | 0x00000020;
91
92                 /* RGMII, RMII or MII */
93                 rcntl |= RTE_BIT32(6);
94                 ecntl |= RTE_BIT32(5);
95         }
96
97         /* enable pause frame*/
98         if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
99                 ((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
100                 /*&& ndev->phydev && ndev->phydev->pause*/)) {
101                 rcntl |= ENETFEC_FCE;
102
103                 /* set FIFO threshold parameter to reduce overrun */
104                 rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
105                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
106                 rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
107                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
108                 rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
109                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
110                 rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
111                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
112
113                 /* OPD */
114                 rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
115                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
116         } else {
117                 rcntl &= ~ENETFEC_FCE;
118         }
119
120         rte_write32(rte_cpu_to_le_32(rcntl),
121                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
122
123         rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
124         rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
125
126         if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
127                 /* enable ENETFEC endian swap */
128                 ecntl |= (1 << 8);
129                 /* enable ENETFEC store and forward mode */
130                 rte_write32(rte_cpu_to_le_32(1 << 8),
131                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
132         }
133         if (fep->bufdesc_ex)
134                 ecntl |= (1 << 4);
135         if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
136                 fep->rgmii_txc_delay)
137                 ecntl |= ENETFEC_TXC_DLY;
138         if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
139                 fep->rgmii_rxc_delay)
140                 ecntl |= ENETFEC_RXC_DLY;
141         /* Enable the MIB statistic event counters */
142         rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
143
144         ecntl |= 0x70000000;
145         fep->enetfec_e_cntl = ecntl;
146         /* And last, enable the transmit and receive processing */
147         rte_write32(rte_cpu_to_le_32(ecntl),
148                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
149         rte_delay_us(10);
150 }
151
152 static void
153 enet_free_buffers(struct rte_eth_dev *dev)
154 {
155         struct enetfec_private *fep = dev->data->dev_private;
156         unsigned int i, q;
157         struct rte_mbuf *mbuf;
158         struct bufdesc  *bdp;
159         struct enetfec_priv_rx_q *rxq;
160         struct enetfec_priv_tx_q *txq;
161
162         for (q = 0; q < dev->data->nb_rx_queues; q++) {
163                 rxq = fep->rx_queues[q];
164                 bdp = rxq->bd.base;
165                 for (i = 0; i < rxq->bd.ring_size; i++) {
166                         mbuf = rxq->rx_mbuf[i];
167                         rxq->rx_mbuf[i] = NULL;
168                         rte_pktmbuf_free(mbuf);
169                         bdp = enet_get_nextdesc(bdp, &rxq->bd);
170                 }
171         }
172
173         for (q = 0; q < dev->data->nb_tx_queues; q++) {
174                 txq = fep->tx_queues[q];
175                 bdp = txq->bd.base;
176                 for (i = 0; i < txq->bd.ring_size; i++) {
177                         mbuf = txq->tx_mbuf[i];
178                         txq->tx_mbuf[i] = NULL;
179                         rte_pktmbuf_free(mbuf);
180                 }
181         }
182 }
183
184 static int
185 enetfec_eth_configure(struct rte_eth_dev *dev)
186 {
187         struct enetfec_private *fep = dev->data->dev_private;
188
189         if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
190                 fep->flag_csum |= RX_FLAG_CSUM_EN;
191
192         if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
193                 ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
194
195         return 0;
196 }
197
198 static int
199 enetfec_eth_start(struct rte_eth_dev *dev)
200 {
201         enetfec_restart(dev);
202         dev->rx_pkt_burst = &enetfec_recv_pkts;
203         dev->tx_pkt_burst = &enetfec_xmit_pkts;
204
205         return 0;
206 }
207
208 /* ENETFEC disable function.
209  * @param[in] base      ENETFEC base address
210  */
211 static void
212 enetfec_disable(struct enetfec_private *fep)
213 {
214         rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
215                     & ~(fep->enetfec_e_cntl),
216                     (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
217 }
218
219 static int
220 enetfec_eth_stop(struct rte_eth_dev *dev)
221 {
222         struct enetfec_private *fep = dev->data->dev_private;
223
224         dev->data->dev_started = 0;
225         enetfec_disable(fep);
226
227         return 0;
228 }
229
230 static int
231 enetfec_eth_close(struct rte_eth_dev *dev)
232 {
233         enet_free_buffers(dev);
234         return 0;
235 }
236
237 static int
238 enetfec_eth_link_update(struct rte_eth_dev *dev,
239                         int wait_to_complete __rte_unused)
240 {
241         struct rte_eth_link link;
242         unsigned int lstatus = 1;
243
244         memset(&link, 0, sizeof(struct rte_eth_link));
245
246         link.link_status = lstatus;
247         link.link_speed = RTE_ETH_SPEED_NUM_1G;
248
249         ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
250                          "Up");
251
252         return rte_eth_linkstatus_set(dev, &link);
253 }
254
255 static int
256 enetfec_promiscuous_enable(struct rte_eth_dev *dev)
257 {
258         struct enetfec_private *fep = dev->data->dev_private;
259         uint32_t tmp;
260
261         tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
262         tmp |= 0x8;
263         tmp &= ~0x2;
264         rte_write32(rte_cpu_to_le_32(tmp),
265                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
266
267         return 0;
268 }
269
270 static int
271 enetfec_multicast_enable(struct rte_eth_dev *dev)
272 {
273         struct enetfec_private *fep = dev->data->dev_private;
274
275         rte_write32(rte_cpu_to_le_32(0xffffffff),
276                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
277         rte_write32(rte_cpu_to_le_32(0xffffffff),
278                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
279         dev->data->all_multicast = 1;
280
281         rte_write32(rte_cpu_to_le_32(0x04400002),
282                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
283         rte_write32(rte_cpu_to_le_32(0x10800049),
284                         (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
285
286         return 0;
287 }
288
289 /* Set a MAC change in hardware. */
290 static int
291 enetfec_set_mac_address(struct rte_eth_dev *dev,
292                     struct rte_ether_addr *addr)
293 {
294         struct enetfec_private *fep = dev->data->dev_private;
295
296         writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
297                 (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
298                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
299         writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
300                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
301
302         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
303
304         return 0;
305 }
306
307 static int
308 enetfec_stats_get(struct rte_eth_dev *dev,
309               struct rte_eth_stats *stats)
310 {
311         struct enetfec_private *fep = dev->data->dev_private;
312         struct rte_eth_stats *eth_stats = &fep->stats;
313
314         stats->ipackets = eth_stats->ipackets;
315         stats->ibytes = eth_stats->ibytes;
316         stats->ierrors = eth_stats->ierrors;
317         stats->opackets = eth_stats->opackets;
318         stats->obytes = eth_stats->obytes;
319         stats->oerrors = eth_stats->oerrors;
320         stats->rx_nombuf = eth_stats->rx_nombuf;
321
322         return 0;
323 }
324
325 static int
326 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
327         struct rte_eth_dev_info *dev_info)
328 {
329         dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
330         dev_info->max_rx_queues = ENETFEC_MAX_Q;
331         dev_info->max_tx_queues = ENETFEC_MAX_Q;
332         dev_info->rx_offload_capa = dev_rx_offloads_sup;
333         return 0;
334 }
335
336 static void
337 enet_free_queue(struct rte_eth_dev *dev)
338 {
339         struct enetfec_private *fep = dev->data->dev_private;
340         unsigned int i;
341
342         for (i = 0; i < dev->data->nb_rx_queues; i++)
343                 rte_free(fep->rx_queues[i]);
344         for (i = 0; i < dev->data->nb_tx_queues; i++)
345                 rte_free(fep->rx_queues[i]);
346 }
347
348 static const unsigned short offset_des_active_rxq[] = {
349         ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
350 };
351
352 static const unsigned short offset_des_active_txq[] = {
353         ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
354 };
355
356 static int
357 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
358                         uint16_t queue_idx,
359                         uint16_t nb_desc,
360                         unsigned int socket_id __rte_unused,
361                         const struct rte_eth_txconf *tx_conf)
362 {
363         struct enetfec_private *fep = dev->data->dev_private;
364         unsigned int i;
365         struct bufdesc *bdp, *bd_base;
366         struct enetfec_priv_tx_q *txq;
367         unsigned int size;
368         unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
369                 sizeof(struct bufdesc);
370         unsigned int dsize_log2 = fls64(dsize);
371
372         /* Tx deferred start is not supported */
373         if (tx_conf->tx_deferred_start) {
374                 ENETFEC_PMD_ERR("Tx deferred start not supported");
375                 return -EINVAL;
376         }
377
378         /* allocate transmit queue */
379         txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
380         if (txq == NULL) {
381                 ENETFEC_PMD_ERR("transmit queue allocation failed");
382                 return -ENOMEM;
383         }
384
385         if (nb_desc > MAX_TX_BD_RING_SIZE) {
386                 nb_desc = MAX_TX_BD_RING_SIZE;
387                 ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
388         }
389         txq->bd.ring_size = nb_desc;
390         fep->total_tx_ring_size += txq->bd.ring_size;
391         fep->tx_queues[queue_idx] = txq;
392
393         rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
394                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
395
396         /* Set transmit descriptor base. */
397         txq = fep->tx_queues[queue_idx];
398         txq->fep = fep;
399         size = dsize * txq->bd.ring_size;
400         bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
401         txq->bd.queue_id = queue_idx;
402         txq->bd.base = bd_base;
403         txq->bd.cur = bd_base;
404         txq->bd.d_size = dsize;
405         txq->bd.d_size_log2 = dsize_log2;
406         txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
407                         offset_des_active_txq[queue_idx];
408         bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
409         txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
410         bdp = txq->bd.base;
411         bdp = txq->bd.cur;
412
413         for (i = 0; i < txq->bd.ring_size; i++) {
414                 /* Initialize the BD for every fragment in the page. */
415                 rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
416                 if (txq->tx_mbuf[i] != NULL) {
417                         rte_pktmbuf_free(txq->tx_mbuf[i]);
418                         txq->tx_mbuf[i] = NULL;
419                 }
420                 rte_write32(0, &bdp->bd_bufaddr);
421                 bdp = enet_get_nextdesc(bdp, &txq->bd);
422         }
423
424         /* Set the last buffer to wrap */
425         bdp = enet_get_prevdesc(bdp, &txq->bd);
426         rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
427                 rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
428         txq->dirty_tx = bdp;
429         dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
430         return 0;
431 }
432
433 static int
434 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
435                         uint16_t queue_idx,
436                         uint16_t nb_rx_desc,
437                         unsigned int socket_id __rte_unused,
438                         const struct rte_eth_rxconf *rx_conf,
439                         struct rte_mempool *mb_pool)
440 {
441         struct enetfec_private *fep = dev->data->dev_private;
442         unsigned int i;
443         struct bufdesc *bd_base;
444         struct bufdesc *bdp;
445         struct enetfec_priv_rx_q *rxq;
446         unsigned int size;
447         unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
448                         sizeof(struct bufdesc);
449         unsigned int dsize_log2 = fls64(dsize);
450
451         /* Rx deferred start is not supported */
452         if (rx_conf->rx_deferred_start) {
453                 ENETFEC_PMD_ERR("Rx deferred start not supported");
454                 return -EINVAL;
455         }
456
457         /* allocate receive queue */
458         rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
459         if (rxq == NULL) {
460                 ENETFEC_PMD_ERR("receive queue allocation failed");
461                 return -ENOMEM;
462         }
463
464         if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
465                 nb_rx_desc = MAX_RX_BD_RING_SIZE;
466                 ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
467         }
468
469         rxq->bd.ring_size = nb_rx_desc;
470         fep->total_rx_ring_size += rxq->bd.ring_size;
471         fep->rx_queues[queue_idx] = rxq;
472
473         rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
474                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
475         rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
476                 (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
477
478         /* Set receive descriptor base. */
479         rxq = fep->rx_queues[queue_idx];
480         rxq->pool = mb_pool;
481         size = dsize * rxq->bd.ring_size;
482         bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
483         rxq->bd.queue_id = queue_idx;
484         rxq->bd.base = bd_base;
485         rxq->bd.cur = bd_base;
486         rxq->bd.d_size = dsize;
487         rxq->bd.d_size_log2 = dsize_log2;
488         rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
489                         offset_des_active_rxq[queue_idx];
490         bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
491         rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
492
493         rxq->fep = fep;
494         bdp = rxq->bd.base;
495         rxq->bd.cur = bdp;
496
497         for (i = 0; i < nb_rx_desc; i++) {
498                 /* Initialize Rx buffers from pktmbuf pool */
499                 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
500                 if (mbuf == NULL) {
501                         ENETFEC_PMD_ERR("mbuf failed");
502                         goto err_alloc;
503                 }
504
505                 /* Get the virtual address & physical address */
506                 rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
507                         &bdp->bd_bufaddr);
508
509                 rxq->rx_mbuf[i] = mbuf;
510                 rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
511
512                 bdp = enet_get_nextdesc(bdp, &rxq->bd);
513         }
514
515         /* Initialize the receive buffer descriptors. */
516         bdp = rxq->bd.cur;
517         for (i = 0; i < rxq->bd.ring_size; i++) {
518                 /* Initialize the BD for every fragment in the page. */
519                 if (rte_read32(&bdp->bd_bufaddr) > 0)
520                         rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
521                                 &bdp->bd_sc);
522                 else
523                         rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
524
525                 bdp = enet_get_nextdesc(bdp, &rxq->bd);
526         }
527
528         /* Set the last buffer to wrap */
529         bdp = enet_get_prevdesc(bdp, &rxq->bd);
530         rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
531                 rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
532         dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
533         rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
534         return 0;
535
536 err_alloc:
537         for (i = 0; i < nb_rx_desc; i++) {
538                 if (rxq->rx_mbuf[i] != NULL) {
539                         rte_pktmbuf_free(rxq->rx_mbuf[i]);
540                         rxq->rx_mbuf[i] = NULL;
541                 }
542         }
543         rte_free(rxq);
544         return errno;
545 }
546
547 static const struct eth_dev_ops enetfec_ops = {
548         .dev_configure          = enetfec_eth_configure,
549         .dev_start              = enetfec_eth_start,
550         .dev_stop               = enetfec_eth_stop,
551         .dev_close              = enetfec_eth_close,
552         .link_update            = enetfec_eth_link_update,
553         .promiscuous_enable     = enetfec_promiscuous_enable,
554         .allmulticast_enable    = enetfec_multicast_enable,
555         .mac_addr_set           = enetfec_set_mac_address,
556         .stats_get              = enetfec_stats_get,
557         .dev_infos_get          = enetfec_eth_info,
558         .rx_queue_setup         = enetfec_rx_queue_setup,
559         .tx_queue_setup         = enetfec_tx_queue_setup
560 };
561
562 static int
563 enetfec_eth_init(struct rte_eth_dev *dev)
564 {
565         struct enetfec_private *fep = dev->data->dev_private;
566
567         fep->full_duplex = FULL_DUPLEX;
568         dev->dev_ops = &enetfec_ops;
569         rte_eth_dev_probing_finish(dev);
570
571         return 0;
572 }
573
574 static int
575 pmd_enetfec_probe(struct rte_vdev_device *vdev)
576 {
577         struct rte_eth_dev *dev = NULL;
578         struct enetfec_private *fep;
579         const char *name;
580         int rc;
581         int i;
582         unsigned int bdsize;
583         struct rte_ether_addr macaddr = {
584                 .addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
585         };
586
587         name = rte_vdev_device_name(vdev);
588         ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
589
590         dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
591         if (dev == NULL)
592                 return -ENOMEM;
593
594         /* setup board info structure */
595         fep = dev->data->dev_private;
596         fep->dev = dev;
597
598         fep->max_rx_queues = ENETFEC_MAX_Q;
599         fep->max_tx_queues = ENETFEC_MAX_Q;
600         fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
601                 | QUIRK_RACC;
602
603         rc = enetfec_configure();
604         if (rc != 0)
605                 return -ENOMEM;
606         rc = config_enetfec_uio(fep);
607         if (rc != 0)
608                 return -ENOMEM;
609
610         /* Get the BD size for distributing among six queues */
611         bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
612
613         for (i = 0; i < fep->max_tx_queues; i++) {
614                 fep->dma_baseaddr_t[i] = fep->bd_addr_v;
615                 fep->bd_addr_p_t[i] = fep->bd_addr_p;
616                 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
617                 fep->bd_addr_p = fep->bd_addr_p + bdsize;
618         }
619         for (i = 0; i < fep->max_rx_queues; i++) {
620                 fep->dma_baseaddr_r[i] = fep->bd_addr_v;
621                 fep->bd_addr_p_r[i] = fep->bd_addr_p;
622                 fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
623                 fep->bd_addr_p = fep->bd_addr_p + bdsize;
624         }
625
626         /* Copy the station address into the dev structure, */
627         dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
628         if (dev->data->mac_addrs == NULL) {
629                 ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
630                         RTE_ETHER_ADDR_LEN);
631                 rc = -ENOMEM;
632                 goto err;
633         }
634
635         /*
636          * Set default mac address
637          */
638         enetfec_set_mac_address(dev, &macaddr);
639
640         fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
641         rc = enetfec_eth_init(dev);
642         if (rc)
643                 goto failed_init;
644
645         return 0;
646
647 failed_init:
648         ENETFEC_PMD_ERR("Failed to init");
649 err:
650         rte_eth_dev_release_port(dev);
651         return rc;
652 }
653
654 static int
655 pmd_enetfec_remove(struct rte_vdev_device *vdev)
656 {
657         struct rte_eth_dev *eth_dev = NULL;
658         struct enetfec_private *fep;
659         struct enetfec_priv_rx_q *rxq;
660         int ret;
661
662         /* find the ethdev entry */
663         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
664         if (eth_dev == NULL)
665                 return -ENODEV;
666
667         fep = eth_dev->data->dev_private;
668         /* Free descriptor base of first RX queue as it was configured
669          * first in enetfec_eth_init().
670          */
671         rxq = fep->rx_queues[0];
672         rte_free(rxq->bd.base);
673         enet_free_queue(eth_dev);
674         enetfec_eth_stop(eth_dev);
675
676         ret = rte_eth_dev_release_port(eth_dev);
677         if (ret != 0)
678                 return -EINVAL;
679
680         ENETFEC_PMD_INFO("Release enetfec sw device");
681         enetfec_cleanup(fep);
682
683         return 0;
684 }
685
686 static struct rte_vdev_driver pmd_enetfec_drv = {
687         .probe = pmd_enetfec_probe,
688         .remove = pmd_enetfec_remove,
689 };
690
691 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
692 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);