net/dpaa: support link status update
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2017 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /* System headers */
34 #include <stdio.h>
35 #include <inttypes.h>
36 #include <unistd.h>
37 #include <limits.h>
38 #include <sched.h>
39 #include <signal.h>
40 #include <pthread.h>
41 #include <sys/types.h>
42 #include <sys/syscall.h>
43
44 #include <rte_config.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
56 #include <rte_eal.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_malloc.h>
61 #include <rte_ring.h>
62
63 #include <rte_dpaa_bus.h>
64 #include <rte_dpaa_logs.h>
65 #include <dpaa_mempool.h>
66
67 #include <dpaa_ethdev.h>
68 #include <dpaa_rxtx.h>
69
70 #include <fsl_usd.h>
71 #include <fsl_qman.h>
72 #include <fsl_bman.h>
73 #include <fsl_fman.h>
74
75 /* Keep track of whether QMAN and BMAN have been globally initialized */
76 static int is_global_init;
77
78 static int
79 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
80 {
81         struct dpaa_if *dpaa_intf = dev->data->dev_private;
82
83         PMD_INIT_FUNC_TRACE();
84
85         if (mtu < ETHER_MIN_MTU)
86                 return -EINVAL;
87         if (mtu > ETHER_MAX_LEN)
88                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
89         else
90                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
91
92         dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu;
93
94         fman_if_set_maxfrm(dpaa_intf->fif, mtu);
95
96         return 0;
97 }
98
99 static int
100 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
101 {
102         PMD_INIT_FUNC_TRACE();
103
104         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
105                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
106                     DPAA_MAX_RX_PKT_LEN)
107                         return dpaa_mtu_set(dev,
108                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
109                 else
110                         return -1;
111         }
112         return 0;
113 }
114
115 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
116 {
117         struct dpaa_if *dpaa_intf = dev->data->dev_private;
118
119         PMD_INIT_FUNC_TRACE();
120
121         /* Change tx callback to the real one */
122         dev->tx_pkt_burst = dpaa_eth_queue_tx;
123         fman_if_enable_rx(dpaa_intf->fif);
124
125         return 0;
126 }
127
128 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
129 {
130         struct dpaa_if *dpaa_intf = dev->data->dev_private;
131
132         PMD_INIT_FUNC_TRACE();
133
134         fman_if_disable_rx(dpaa_intf->fif);
135         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
136 }
137
138 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
139 {
140         PMD_INIT_FUNC_TRACE();
141
142         dpaa_eth_dev_stop(dev);
143 }
144
145 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
146                                 int wait_to_complete __rte_unused)
147 {
148         struct dpaa_if *dpaa_intf = dev->data->dev_private;
149         struct rte_eth_link *link = &dev->data->dev_link;
150
151         PMD_INIT_FUNC_TRACE();
152
153         if (dpaa_intf->fif->mac_type == fman_mac_1g)
154                 link->link_speed = 1000;
155         else if (dpaa_intf->fif->mac_type == fman_mac_10g)
156                 link->link_speed = 10000;
157         else
158                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
159                              dpaa_intf->name, dpaa_intf->fif->mac_type);
160
161         link->link_status = dpaa_intf->valid;
162         link->link_duplex = ETH_LINK_FULL_DUPLEX;
163         link->link_autoneg = ETH_LINK_AUTONEG;
164         return 0;
165 }
166
167 static
168 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
169                             uint16_t nb_desc __rte_unused,
170                             unsigned int socket_id __rte_unused,
171                             const struct rte_eth_rxconf *rx_conf __rte_unused,
172                             struct rte_mempool *mp)
173 {
174         struct dpaa_if *dpaa_intf = dev->data->dev_private;
175
176         PMD_INIT_FUNC_TRACE();
177
178         DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
179
180         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
181                 struct fman_if_ic_params icp;
182                 uint32_t fd_offset;
183                 uint32_t bp_size;
184
185                 if (!mp->pool_data) {
186                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
187                         return -1;
188                 }
189                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
190
191                 memset(&icp, 0, sizeof(icp));
192                 /* set ICEOF for to the default value , which is 0*/
193                 icp.iciof = DEFAULT_ICIOF;
194                 icp.iceof = DEFAULT_RX_ICEOF;
195                 icp.icsz = DEFAULT_ICSZ;
196                 fman_if_set_ic_params(dpaa_intf->fif, &icp);
197
198                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
199                 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
200
201                 /* Buffer pool size should be equal to Dataroom Size*/
202                 bp_size = rte_pktmbuf_data_room_size(mp);
203                 fman_if_set_bp(dpaa_intf->fif, mp->size,
204                                dpaa_intf->bp_info->bpid, bp_size);
205                 dpaa_intf->valid = 1;
206                 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
207                             dpaa_intf->name, fd_offset,
208                         fman_if_get_fdoff(dpaa_intf->fif));
209         }
210         dev->data->rx_queues[queue_idx] = &dpaa_intf->rx_queues[queue_idx];
211
212         return 0;
213 }
214
215 static
216 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
217 {
218         PMD_INIT_FUNC_TRACE();
219 }
220
221 static
222 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
223                             uint16_t nb_desc __rte_unused,
224                 unsigned int socket_id __rte_unused,
225                 const struct rte_eth_txconf *tx_conf __rte_unused)
226 {
227         struct dpaa_if *dpaa_intf = dev->data->dev_private;
228
229         PMD_INIT_FUNC_TRACE();
230
231         DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
232         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
233         return 0;
234 }
235
236 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
237 {
238         PMD_INIT_FUNC_TRACE();
239 }
240
241 static int dpaa_link_down(struct rte_eth_dev *dev)
242 {
243         PMD_INIT_FUNC_TRACE();
244
245         dpaa_eth_dev_stop(dev);
246         return 0;
247 }
248
249 static int dpaa_link_up(struct rte_eth_dev *dev)
250 {
251         PMD_INIT_FUNC_TRACE();
252
253         dpaa_eth_dev_start(dev);
254         return 0;
255 }
256
257 static struct eth_dev_ops dpaa_devops = {
258         .dev_configure            = dpaa_eth_dev_configure,
259         .dev_start                = dpaa_eth_dev_start,
260         .dev_stop                 = dpaa_eth_dev_stop,
261         .dev_close                = dpaa_eth_dev_close,
262
263         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
264         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
265         .rx_queue_release         = dpaa_eth_rx_queue_release,
266         .tx_queue_release         = dpaa_eth_tx_queue_release,
267
268         .link_update              = dpaa_eth_link_update,
269         .mtu_set                  = dpaa_mtu_set,
270         .dev_set_link_down        = dpaa_link_down,
271         .dev_set_link_up          = dpaa_link_up,
272 };
273
274 /* Initialise an Rx FQ */
275 static int dpaa_rx_queue_init(struct qman_fq *fq,
276                               uint32_t fqid)
277 {
278         struct qm_mcc_initfq opts;
279         int ret;
280
281         PMD_INIT_FUNC_TRACE();
282
283         ret = qman_reserve_fqid(fqid);
284         if (ret) {
285                 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
286                              fqid, ret);
287                 return -EINVAL;
288         }
289
290         DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
291         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
292         if (ret) {
293                 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
294                         fqid, ret);
295                 return ret;
296         }
297
298         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
299                        QM_INITFQ_WE_CONTEXTA;
300
301         opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
302         opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
303                            QM_FQCTRL_PREFERINCACHE;
304         opts.fqd.context_a.stashing.exclusive = 0;
305         opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
306         opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
307         opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
308
309         /*Enable tail drop */
310         opts.we_mask = opts.we_mask | QM_INITFQ_WE_TDTHRESH;
311         opts.fqd.fq_ctrl = opts.fqd.fq_ctrl | QM_FQCTRL_TDE;
312         qm_fqd_taildrop_set(&opts.fqd.td, CONG_THRESHOLD_RX_Q, 1);
313
314         ret = qman_init_fq(fq, 0, &opts);
315         if (ret)
316                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
317         return ret;
318 }
319
320 /* Initialise a Tx FQ */
321 static int dpaa_tx_queue_init(struct qman_fq *fq,
322                               struct fman_if *fman_intf)
323 {
324         struct qm_mcc_initfq opts;
325         int ret;
326
327         PMD_INIT_FUNC_TRACE();
328
329         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
330                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
331         if (ret) {
332                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
333                 return ret;
334         }
335         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
336                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
337         opts.fqd.dest.channel = fman_intf->tx_channel_id;
338         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
339         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
340         opts.fqd.context_b = 0;
341         /* no tx-confirmation */
342         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
343         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
344         DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
345         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
346         if (ret)
347                 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
348         return ret;
349 }
350
351 /* Initialise a network interface */
352 static int
353 dpaa_dev_init(struct rte_eth_dev *eth_dev)
354 {
355         int num_cores, num_rx_fqs, fqid;
356         int loop, ret = 0;
357         int dev_id;
358         struct rte_dpaa_device *dpaa_device;
359         struct dpaa_if *dpaa_intf;
360         struct fm_eth_port_cfg *cfg;
361         struct fman_if *fman_intf;
362         struct fman_if_bpool *bp, *tmp_bp;
363
364         PMD_INIT_FUNC_TRACE();
365
366         /* For secondary processes, the primary has done all the work */
367         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
368                 return 0;
369
370         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
371         dev_id = dpaa_device->id.dev_id;
372         dpaa_intf = eth_dev->data->dev_private;
373         cfg = &dpaa_netcfg->port_cfg[dev_id];
374         fman_intf = cfg->fman_if;
375
376         dpaa_intf->name = dpaa_device->name;
377
378         /* save fman_if & cfg in the interface struture */
379         dpaa_intf->fif = fman_intf;
380         dpaa_intf->ifid = dev_id;
381         dpaa_intf->cfg = cfg;
382
383         /* Initialize Rx FQ's */
384         if (getenv("DPAA_NUM_RX_QUEUES"))
385                 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
386         else
387                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
388
389         /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
390          * queues.
391          */
392         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
393                 DPAA_PMD_ERR("Invalid number of RX queues\n");
394                 return -EINVAL;
395         }
396
397         dpaa_intf->rx_queues = rte_zmalloc(NULL,
398                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
399         for (loop = 0; loop < num_rx_fqs; loop++) {
400                 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
401                         DPAA_PCD_FQID_MULTIPLIER + loop;
402                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop], fqid);
403                 if (ret)
404                         return ret;
405                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
406         }
407         dpaa_intf->nb_rx_queues = num_rx_fqs;
408
409         /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
410         num_cores = rte_lcore_count();
411         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
412                 num_cores, MAX_CACHELINE);
413         if (!dpaa_intf->tx_queues)
414                 return -ENOMEM;
415
416         for (loop = 0; loop < num_cores; loop++) {
417                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
418                                          fman_intf);
419                 if (ret)
420                         return ret;
421                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
422         }
423         dpaa_intf->nb_tx_queues = num_cores;
424
425         DPAA_PMD_DEBUG("All frame queues created");
426
427         /* reset bpool list, initialize bpool dynamically */
428         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
429                 list_del(&bp->node);
430                 rte_free(bp);
431         }
432
433         /* Populate ethdev structure */
434         eth_dev->dev_ops = &dpaa_devops;
435         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
436         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
437
438         /* Allocate memory for storing MAC addresses */
439         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
440                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
441         if (eth_dev->data->mac_addrs == NULL) {
442                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
443                                                 "store MAC addresses",
444                                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
445                 rte_free(dpaa_intf->rx_queues);
446                 rte_free(dpaa_intf->tx_queues);
447                 dpaa_intf->rx_queues = NULL;
448                 dpaa_intf->tx_queues = NULL;
449                 dpaa_intf->nb_rx_queues = 0;
450                 dpaa_intf->nb_tx_queues = 0;
451                 return -ENOMEM;
452         }
453
454         /* copy the primary mac address */
455         ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
456
457         RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
458                 dpaa_device->name,
459                 fman_intf->mac_addr.addr_bytes[0],
460                 fman_intf->mac_addr.addr_bytes[1],
461                 fman_intf->mac_addr.addr_bytes[2],
462                 fman_intf->mac_addr.addr_bytes[3],
463                 fman_intf->mac_addr.addr_bytes[4],
464                 fman_intf->mac_addr.addr_bytes[5]);
465
466         /* Disable RX mode */
467         fman_if_discard_rx_errors(fman_intf);
468         fman_if_disable_rx(fman_intf);
469         /* Disable promiscuous mode */
470         fman_if_promiscuous_disable(fman_intf);
471         /* Disable multicast */
472         fman_if_reset_mcast_filter_table(fman_intf);
473         /* Reset interface statistics */
474         fman_if_stats_reset(fman_intf);
475
476         return 0;
477 }
478
479 static int
480 dpaa_dev_uninit(struct rte_eth_dev *dev)
481 {
482         struct dpaa_if *dpaa_intf = dev->data->dev_private;
483
484         PMD_INIT_FUNC_TRACE();
485
486         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
487                 return -EPERM;
488
489         if (!dpaa_intf) {
490                 DPAA_PMD_WARN("Already closed or not started");
491                 return -1;
492         }
493
494         dpaa_eth_dev_close(dev);
495
496         /* release configuration memory */
497         if (dpaa_intf->fc_conf)
498                 rte_free(dpaa_intf->fc_conf);
499
500         rte_free(dpaa_intf->rx_queues);
501         dpaa_intf->rx_queues = NULL;
502
503         rte_free(dpaa_intf->tx_queues);
504         dpaa_intf->tx_queues = NULL;
505
506         /* free memory for storing MAC addresses */
507         rte_free(dev->data->mac_addrs);
508         dev->data->mac_addrs = NULL;
509
510         dev->dev_ops = NULL;
511         dev->rx_pkt_burst = NULL;
512         dev->tx_pkt_burst = NULL;
513
514         return 0;
515 }
516
517 static int
518 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
519                struct rte_dpaa_device *dpaa_dev)
520 {
521         int diag;
522         int ret;
523         struct rte_eth_dev *eth_dev;
524
525         PMD_INIT_FUNC_TRACE();
526
527         /* In case of secondary process, the device is already configured
528          * and no further action is required, except portal initialization
529          * and verifying secondary attachment to port name.
530          */
531         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
532                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
533                 if (!eth_dev)
534                         return -ENOMEM;
535                 return 0;
536         }
537
538         if (!is_global_init) {
539                 /* One time load of Qman/Bman drivers */
540                 ret = qman_global_init();
541                 if (ret) {
542                         DPAA_PMD_ERR("QMAN initialization failed: %d",
543                                      ret);
544                         return ret;
545                 }
546                 ret = bman_global_init();
547                 if (ret) {
548                         DPAA_PMD_ERR("BMAN initialization failed: %d",
549                                      ret);
550                         return ret;
551                 }
552
553                 is_global_init = 1;
554         }
555
556         ret = rte_dpaa_portal_init((void *)1);
557         if (ret) {
558                 DPAA_PMD_ERR("Unable to initialize portal");
559                 return ret;
560         }
561
562         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
563         if (eth_dev == NULL)
564                 return -ENOMEM;
565
566         eth_dev->data->dev_private = rte_zmalloc(
567                                         "ethdev private structure",
568                                         sizeof(struct dpaa_if),
569                                         RTE_CACHE_LINE_SIZE);
570         if (!eth_dev->data->dev_private) {
571                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
572                 rte_eth_dev_release_port(eth_dev);
573                 return -ENOMEM;
574         }
575
576         eth_dev->device = &dpaa_dev->device;
577         eth_dev->device->driver = &dpaa_drv->driver;
578         dpaa_dev->eth_dev = eth_dev;
579
580         /* Invoke PMD device initialization function */
581         diag = dpaa_dev_init(eth_dev);
582         if (diag == 0)
583                 return 0;
584
585         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
586                 rte_free(eth_dev->data->dev_private);
587
588         rte_eth_dev_release_port(eth_dev);
589         return diag;
590 }
591
592 static int
593 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
594 {
595         struct rte_eth_dev *eth_dev;
596
597         PMD_INIT_FUNC_TRACE();
598
599         eth_dev = dpaa_dev->eth_dev;
600         dpaa_dev_uninit(eth_dev);
601
602         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
603                 rte_free(eth_dev->data->dev_private);
604
605         rte_eth_dev_release_port(eth_dev);
606
607         return 0;
608 }
609
610 static struct rte_dpaa_driver rte_dpaa_pmd = {
611         .drv_type = FSL_DPAA_ETH,
612         .probe = rte_dpaa_probe,
613         .remove = rte_dpaa_remove,
614 };
615
616 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);