net/dpaa: support loopback API
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_log.h>
22 #include <rte_debug.h>
23 #include <rte_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev.h>
32 #include <rte_malloc.h>
33 #include <rte_ring.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
38
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
41 #include <rte_pmd_dpaa.h>
42
43 #include <fsl_usd.h>
44 #include <fsl_qman.h>
45 #include <fsl_bman.h>
46 #include <fsl_fman.h>
47
48 /* Keep track of whether QMAN and BMAN have been globally initialized */
49 static int is_global_init;
50
51 /* Per FQ Taildrop in frame count */
52 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
53
54 struct rte_dpaa_xstats_name_off {
55         char name[RTE_ETH_XSTATS_NAME_SIZE];
56         uint32_t offset;
57 };
58
59 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
60         {"rx_align_err",
61                 offsetof(struct dpaa_if_stats, raln)},
62         {"rx_valid_pause",
63                 offsetof(struct dpaa_if_stats, rxpf)},
64         {"rx_fcs_err",
65                 offsetof(struct dpaa_if_stats, rfcs)},
66         {"rx_vlan_frame",
67                 offsetof(struct dpaa_if_stats, rvlan)},
68         {"rx_frame_err",
69                 offsetof(struct dpaa_if_stats, rerr)},
70         {"rx_drop_err",
71                 offsetof(struct dpaa_if_stats, rdrp)},
72         {"rx_undersized",
73                 offsetof(struct dpaa_if_stats, rund)},
74         {"rx_oversize_err",
75                 offsetof(struct dpaa_if_stats, rovr)},
76         {"rx_fragment_pkt",
77                 offsetof(struct dpaa_if_stats, rfrg)},
78         {"tx_valid_pause",
79                 offsetof(struct dpaa_if_stats, txpf)},
80         {"tx_fcs_err",
81                 offsetof(struct dpaa_if_stats, terr)},
82         {"tx_vlan_frame",
83                 offsetof(struct dpaa_if_stats, tvlan)},
84         {"rx_undersized",
85                 offsetof(struct dpaa_if_stats, tund)},
86 };
87
88 static struct rte_dpaa_driver rte_dpaa_pmd;
89
90 static int
91 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
92 {
93         struct dpaa_if *dpaa_intf = dev->data->dev_private;
94         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
95                                 + VLAN_TAG_SIZE;
96
97         PMD_INIT_FUNC_TRACE();
98
99         if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
100                 return -EINVAL;
101         if (frame_size > ETHER_MAX_LEN)
102                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
103         else
104                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
105
106         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
107
108         fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
109
110         return 0;
111 }
112
113 static int
114 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
115 {
116         struct dpaa_if *dpaa_intf = dev->data->dev_private;
117
118         PMD_INIT_FUNC_TRACE();
119
120         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
121                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
122                     DPAA_MAX_RX_PKT_LEN) {
123                         fman_if_set_maxfrm(dpaa_intf->fif,
124                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
125                         return 0;
126                 } else {
127                         return -1;
128                 }
129         }
130         return 0;
131 }
132
133 static const uint32_t *
134 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
135 {
136         static const uint32_t ptypes[] = {
137                 /*todo -= add more types */
138                 RTE_PTYPE_L2_ETHER,
139                 RTE_PTYPE_L3_IPV4,
140                 RTE_PTYPE_L3_IPV4_EXT,
141                 RTE_PTYPE_L3_IPV6,
142                 RTE_PTYPE_L3_IPV6_EXT,
143                 RTE_PTYPE_L4_TCP,
144                 RTE_PTYPE_L4_UDP,
145                 RTE_PTYPE_L4_SCTP
146         };
147
148         PMD_INIT_FUNC_TRACE();
149
150         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
151                 return ptypes;
152         return NULL;
153 }
154
155 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
156 {
157         struct dpaa_if *dpaa_intf = dev->data->dev_private;
158
159         PMD_INIT_FUNC_TRACE();
160
161         /* Change tx callback to the real one */
162         dev->tx_pkt_burst = dpaa_eth_queue_tx;
163         fman_if_enable_rx(dpaa_intf->fif);
164
165         return 0;
166 }
167
168 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
169 {
170         struct dpaa_if *dpaa_intf = dev->data->dev_private;
171
172         PMD_INIT_FUNC_TRACE();
173
174         fman_if_disable_rx(dpaa_intf->fif);
175         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
176 }
177
178 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
179 {
180         PMD_INIT_FUNC_TRACE();
181
182         dpaa_eth_dev_stop(dev);
183 }
184
185 static int
186 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
187                      char *fw_version,
188                      size_t fw_size)
189 {
190         int ret;
191         FILE *svr_file = NULL;
192         unsigned int svr_ver = 0;
193
194         PMD_INIT_FUNC_TRACE();
195
196         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
197         if (!svr_file) {
198                 DPAA_PMD_ERR("Unable to open SoC device");
199                 return -ENOTSUP; /* Not supported on this infra */
200         }
201         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
202                 dpaa_svr_family = svr_ver & SVR_MASK;
203         else
204                 DPAA_PMD_ERR("Unable to read SoC device");
205
206         fclose(svr_file);
207
208         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
209                        svr_ver, fman_ip_rev);
210         ret += 1; /* add the size of '\0' */
211
212         if (fw_size < (uint32_t)ret)
213                 return ret;
214         else
215                 return 0;
216 }
217
218 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
219                               struct rte_eth_dev_info *dev_info)
220 {
221         struct dpaa_if *dpaa_intf = dev->data->dev_private;
222
223         PMD_INIT_FUNC_TRACE();
224
225         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
226         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
227         dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
228         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
229         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
230         dev_info->max_hash_mac_addrs = 0;
231         dev_info->max_vfs = 0;
232         dev_info->max_vmdq_pools = ETH_16_POOLS;
233         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
234         dev_info->speed_capa = (ETH_LINK_SPEED_1G |
235                                 ETH_LINK_SPEED_10G);
236         dev_info->rx_offload_capa =
237                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
238                 DEV_RX_OFFLOAD_UDP_CKSUM   |
239                 DEV_RX_OFFLOAD_TCP_CKSUM);
240         dev_info->tx_offload_capa =
241                 (DEV_TX_OFFLOAD_IPV4_CKSUM  |
242                 DEV_TX_OFFLOAD_UDP_CKSUM   |
243                 DEV_TX_OFFLOAD_TCP_CKSUM);
244 }
245
246 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
247                                 int wait_to_complete __rte_unused)
248 {
249         struct dpaa_if *dpaa_intf = dev->data->dev_private;
250         struct rte_eth_link *link = &dev->data->dev_link;
251
252         PMD_INIT_FUNC_TRACE();
253
254         if (dpaa_intf->fif->mac_type == fman_mac_1g)
255                 link->link_speed = 1000;
256         else if (dpaa_intf->fif->mac_type == fman_mac_10g)
257                 link->link_speed = 10000;
258         else
259                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
260                              dpaa_intf->name, dpaa_intf->fif->mac_type);
261
262         link->link_status = dpaa_intf->valid;
263         link->link_duplex = ETH_LINK_FULL_DUPLEX;
264         link->link_autoneg = ETH_LINK_AUTONEG;
265         return 0;
266 }
267
268 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
269                                struct rte_eth_stats *stats)
270 {
271         struct dpaa_if *dpaa_intf = dev->data->dev_private;
272
273         PMD_INIT_FUNC_TRACE();
274
275         fman_if_stats_get(dpaa_intf->fif, stats);
276         return 0;
277 }
278
279 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
280 {
281         struct dpaa_if *dpaa_intf = dev->data->dev_private;
282
283         PMD_INIT_FUNC_TRACE();
284
285         fman_if_stats_reset(dpaa_intf->fif);
286 }
287
288 static int
289 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
290                     unsigned int n)
291 {
292         struct dpaa_if *dpaa_intf = dev->data->dev_private;
293         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
294         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
295
296         if (xstats == NULL)
297                 return 0;
298
299         if (n < num)
300                 return num;
301
302         fman_if_stats_get_all(dpaa_intf->fif, values,
303                               sizeof(struct dpaa_if_stats) / 8);
304
305         for (i = 0; i < num; i++) {
306                 xstats[i].id = i;
307                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
308         }
309         return i;
310 }
311
312 static int
313 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
314                       struct rte_eth_xstat_name *xstats_names,
315                       __rte_unused unsigned int limit)
316 {
317         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
318
319         if (xstats_names != NULL)
320                 for (i = 0; i < stat_cnt; i++)
321                         snprintf(xstats_names[i].name,
322                                  sizeof(xstats_names[i].name),
323                                  "%s",
324                                  dpaa_xstats_strings[i].name);
325
326         return stat_cnt;
327 }
328
329 static int
330 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
331                       uint64_t *values, unsigned int n)
332 {
333         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
334         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
335
336         if (!ids) {
337                 struct dpaa_if *dpaa_intf = dev->data->dev_private;
338
339                 if (n < stat_cnt)
340                         return stat_cnt;
341
342                 if (!values)
343                         return 0;
344
345                 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
346                                       sizeof(struct dpaa_if_stats));
347
348                 for (i = 0; i < stat_cnt; i++)
349                         values[i] =
350                                 values_copy[dpaa_xstats_strings[i].offset / 8];
351
352                 return stat_cnt;
353         }
354
355         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
356
357         for (i = 0; i < n; i++) {
358                 if (ids[i] >= stat_cnt) {
359                         DPAA_PMD_ERR("id value isn't valid");
360                         return -1;
361                 }
362                 values[i] = values_copy[ids[i]];
363         }
364         return n;
365 }
366
367 static int
368 dpaa_xstats_get_names_by_id(
369         struct rte_eth_dev *dev,
370         struct rte_eth_xstat_name *xstats_names,
371         const uint64_t *ids,
372         unsigned int limit)
373 {
374         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
375         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
376
377         if (!ids)
378                 return dpaa_xstats_get_names(dev, xstats_names, limit);
379
380         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
381
382         for (i = 0; i < limit; i++) {
383                 if (ids[i] >= stat_cnt) {
384                         DPAA_PMD_ERR("id value isn't valid");
385                         return -1;
386                 }
387                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
388         }
389         return limit;
390 }
391
392 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
393 {
394         struct dpaa_if *dpaa_intf = dev->data->dev_private;
395
396         PMD_INIT_FUNC_TRACE();
397
398         fman_if_promiscuous_enable(dpaa_intf->fif);
399 }
400
401 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
402 {
403         struct dpaa_if *dpaa_intf = dev->data->dev_private;
404
405         PMD_INIT_FUNC_TRACE();
406
407         fman_if_promiscuous_disable(dpaa_intf->fif);
408 }
409
410 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
411 {
412         struct dpaa_if *dpaa_intf = dev->data->dev_private;
413
414         PMD_INIT_FUNC_TRACE();
415
416         fman_if_set_mcast_filter_table(dpaa_intf->fif);
417 }
418
419 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
420 {
421         struct dpaa_if *dpaa_intf = dev->data->dev_private;
422
423         PMD_INIT_FUNC_TRACE();
424
425         fman_if_reset_mcast_filter_table(dpaa_intf->fif);
426 }
427
428 static
429 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
430                             uint16_t nb_desc,
431                             unsigned int socket_id __rte_unused,
432                             const struct rte_eth_rxconf *rx_conf __rte_unused,
433                             struct rte_mempool *mp)
434 {
435         struct dpaa_if *dpaa_intf = dev->data->dev_private;
436         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
437
438         PMD_INIT_FUNC_TRACE();
439
440         DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
441
442         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
443                 struct fman_if_ic_params icp;
444                 uint32_t fd_offset;
445                 uint32_t bp_size;
446
447                 if (!mp->pool_data) {
448                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
449                         return -1;
450                 }
451                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
452
453                 memset(&icp, 0, sizeof(icp));
454                 /* set ICEOF for to the default value , which is 0*/
455                 icp.iciof = DEFAULT_ICIOF;
456                 icp.iceof = DEFAULT_RX_ICEOF;
457                 icp.icsz = DEFAULT_ICSZ;
458                 fman_if_set_ic_params(dpaa_intf->fif, &icp);
459
460                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
461                 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
462
463                 /* Buffer pool size should be equal to Dataroom Size*/
464                 bp_size = rte_pktmbuf_data_room_size(mp);
465                 fman_if_set_bp(dpaa_intf->fif, mp->size,
466                                dpaa_intf->bp_info->bpid, bp_size);
467                 dpaa_intf->valid = 1;
468                 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
469                             dpaa_intf->name, fd_offset,
470                         fman_if_get_fdoff(dpaa_intf->fif));
471         }
472
473         dev->data->rx_queues[queue_idx] = rxq;
474
475         /* configure the CGR size as per the desc size */
476         if (dpaa_intf->cgr_rx) {
477                 struct qm_mcc_initcgr cgr_opts = {0};
478                 int ret;
479
480                 /* Enable tail drop with cgr on this queue */
481                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
482                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
483                 if (ret) {
484                         DPAA_PMD_WARN(
485                                 "rx taildrop modify fail on fqid %d (ret=%d)",
486                                 rxq->fqid, ret);
487                 }
488         }
489
490         return 0;
491 }
492
493 static
494 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
495 {
496         PMD_INIT_FUNC_TRACE();
497 }
498
499 static
500 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
501                             uint16_t nb_desc __rte_unused,
502                 unsigned int socket_id __rte_unused,
503                 const struct rte_eth_txconf *tx_conf __rte_unused)
504 {
505         struct dpaa_if *dpaa_intf = dev->data->dev_private;
506
507         PMD_INIT_FUNC_TRACE();
508
509         DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
510         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
511         return 0;
512 }
513
514 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
515 {
516         PMD_INIT_FUNC_TRACE();
517 }
518
519 static uint32_t
520 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
521 {
522         struct dpaa_if *dpaa_intf = dev->data->dev_private;
523         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
524         u32 frm_cnt = 0;
525
526         PMD_INIT_FUNC_TRACE();
527
528         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
529                 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
530                         rx_queue_id, frm_cnt);
531         }
532         return frm_cnt;
533 }
534
535 static int dpaa_link_down(struct rte_eth_dev *dev)
536 {
537         PMD_INIT_FUNC_TRACE();
538
539         dpaa_eth_dev_stop(dev);
540         return 0;
541 }
542
543 static int dpaa_link_up(struct rte_eth_dev *dev)
544 {
545         PMD_INIT_FUNC_TRACE();
546
547         dpaa_eth_dev_start(dev);
548         return 0;
549 }
550
551 static int
552 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
553                    struct rte_eth_fc_conf *fc_conf)
554 {
555         struct dpaa_if *dpaa_intf = dev->data->dev_private;
556         struct rte_eth_fc_conf *net_fc;
557
558         PMD_INIT_FUNC_TRACE();
559
560         if (!(dpaa_intf->fc_conf)) {
561                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
562                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
563                 if (!dpaa_intf->fc_conf) {
564                         DPAA_PMD_ERR("unable to save flow control info");
565                         return -ENOMEM;
566                 }
567         }
568         net_fc = dpaa_intf->fc_conf;
569
570         if (fc_conf->high_water < fc_conf->low_water) {
571                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
572                 return -EINVAL;
573         }
574
575         if (fc_conf->mode == RTE_FC_NONE) {
576                 return 0;
577         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
578                  fc_conf->mode == RTE_FC_FULL) {
579                 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
580                                          fc_conf->low_water,
581                                 dpaa_intf->bp_info->bpid);
582                 if (fc_conf->pause_time)
583                         fman_if_set_fc_quanta(dpaa_intf->fif,
584                                               fc_conf->pause_time);
585         }
586
587         /* Save the information in dpaa device */
588         net_fc->pause_time = fc_conf->pause_time;
589         net_fc->high_water = fc_conf->high_water;
590         net_fc->low_water = fc_conf->low_water;
591         net_fc->send_xon = fc_conf->send_xon;
592         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
593         net_fc->mode = fc_conf->mode;
594         net_fc->autoneg = fc_conf->autoneg;
595
596         return 0;
597 }
598
599 static int
600 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
601                    struct rte_eth_fc_conf *fc_conf)
602 {
603         struct dpaa_if *dpaa_intf = dev->data->dev_private;
604         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
605         int ret;
606
607         PMD_INIT_FUNC_TRACE();
608
609         if (net_fc) {
610                 fc_conf->pause_time = net_fc->pause_time;
611                 fc_conf->high_water = net_fc->high_water;
612                 fc_conf->low_water = net_fc->low_water;
613                 fc_conf->send_xon = net_fc->send_xon;
614                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
615                 fc_conf->mode = net_fc->mode;
616                 fc_conf->autoneg = net_fc->autoneg;
617                 return 0;
618         }
619         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
620         if (ret) {
621                 fc_conf->mode = RTE_FC_TX_PAUSE;
622                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
623         } else {
624                 fc_conf->mode = RTE_FC_NONE;
625         }
626
627         return 0;
628 }
629
630 static int
631 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
632                              struct ether_addr *addr,
633                              uint32_t index,
634                              __rte_unused uint32_t pool)
635 {
636         int ret;
637         struct dpaa_if *dpaa_intf = dev->data->dev_private;
638
639         PMD_INIT_FUNC_TRACE();
640
641         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
642
643         if (ret)
644                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
645                         " err = %d", ret);
646         return 0;
647 }
648
649 static void
650 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
651                           uint32_t index)
652 {
653         struct dpaa_if *dpaa_intf = dev->data->dev_private;
654
655         PMD_INIT_FUNC_TRACE();
656
657         fman_if_clear_mac_addr(dpaa_intf->fif, index);
658 }
659
660 static void
661 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
662                        struct ether_addr *addr)
663 {
664         int ret;
665         struct dpaa_if *dpaa_intf = dev->data->dev_private;
666
667         PMD_INIT_FUNC_TRACE();
668
669         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
670         if (ret)
671                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
672 }
673
674 static struct eth_dev_ops dpaa_devops = {
675         .dev_configure            = dpaa_eth_dev_configure,
676         .dev_start                = dpaa_eth_dev_start,
677         .dev_stop                 = dpaa_eth_dev_stop,
678         .dev_close                = dpaa_eth_dev_close,
679         .dev_infos_get            = dpaa_eth_dev_info,
680         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
681
682         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
683         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
684         .rx_queue_release         = dpaa_eth_rx_queue_release,
685         .tx_queue_release         = dpaa_eth_tx_queue_release,
686         .rx_queue_count           = dpaa_dev_rx_queue_count,
687
688         .flow_ctrl_get            = dpaa_flow_ctrl_get,
689         .flow_ctrl_set            = dpaa_flow_ctrl_set,
690
691         .link_update              = dpaa_eth_link_update,
692         .stats_get                = dpaa_eth_stats_get,
693         .xstats_get               = dpaa_dev_xstats_get,
694         .xstats_get_by_id         = dpaa_xstats_get_by_id,
695         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
696         .xstats_get_names         = dpaa_xstats_get_names,
697         .xstats_reset             = dpaa_eth_stats_reset,
698         .stats_reset              = dpaa_eth_stats_reset,
699         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
700         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
701         .allmulticast_enable      = dpaa_eth_multicast_enable,
702         .allmulticast_disable     = dpaa_eth_multicast_disable,
703         .mtu_set                  = dpaa_mtu_set,
704         .dev_set_link_down        = dpaa_link_down,
705         .dev_set_link_up          = dpaa_link_up,
706         .mac_addr_add             = dpaa_dev_add_mac_addr,
707         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
708         .mac_addr_set             = dpaa_dev_set_mac_addr,
709
710         .fw_version_get           = dpaa_fw_version_get,
711 };
712
713 static bool
714 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
715 {
716         if (strcmp(dev->device->driver->name,
717                    drv->driver.name))
718                 return false;
719
720         return true;
721 }
722
723 static bool
724 is_dpaa_supported(struct rte_eth_dev *dev)
725 {
726         return is_device_supported(dev, &rte_dpaa_pmd);
727 }
728
729 int
730 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
731 {
732         struct rte_eth_dev *dev;
733         struct dpaa_if *dpaa_intf;
734
735         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
736
737         dev = &rte_eth_devices[port];
738
739         if (!is_dpaa_supported(dev))
740                 return -ENOTSUP;
741
742         dpaa_intf = dev->data->dev_private;
743
744         if (on)
745                 fman_if_loopback_enable(dpaa_intf->fif);
746         else
747                 fman_if_loopback_disable(dpaa_intf->fif);
748
749         return 0;
750 }
751
752 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
753 {
754         struct rte_eth_fc_conf *fc_conf;
755         int ret;
756
757         PMD_INIT_FUNC_TRACE();
758
759         if (!(dpaa_intf->fc_conf)) {
760                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
761                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
762                 if (!dpaa_intf->fc_conf) {
763                         DPAA_PMD_ERR("unable to save flow control info");
764                         return -ENOMEM;
765                 }
766         }
767         fc_conf = dpaa_intf->fc_conf;
768         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
769         if (ret) {
770                 fc_conf->mode = RTE_FC_TX_PAUSE;
771                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
772         } else {
773                 fc_conf->mode = RTE_FC_NONE;
774         }
775
776         return 0;
777 }
778
779 /* Initialise an Rx FQ */
780 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
781                               uint32_t fqid)
782 {
783         struct qm_mcc_initfq opts = {0};
784         int ret;
785         u32 flags = 0;
786         struct qm_mcc_initcgr cgr_opts = {
787                 .we_mask = QM_CGR_WE_CS_THRES |
788                                 QM_CGR_WE_CSTD_EN |
789                                 QM_CGR_WE_MODE,
790                 .cgr = {
791                         .cstd_en = QM_CGR_EN,
792                         .mode = QMAN_CGR_MODE_FRAME
793                 }
794         };
795
796         PMD_INIT_FUNC_TRACE();
797
798         ret = qman_reserve_fqid(fqid);
799         if (ret) {
800                 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
801                              fqid, ret);
802                 return -EINVAL;
803         }
804
805         DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
806         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
807         if (ret) {
808                 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
809                         fqid, ret);
810                 return ret;
811         }
812
813         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
814                        QM_INITFQ_WE_CONTEXTA;
815
816         opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
817         opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
818                            QM_FQCTRL_PREFERINCACHE;
819         opts.fqd.context_a.stashing.exclusive = 0;
820         opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
821         opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
822         opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
823
824         if (cgr_rx) {
825                 /* Enable tail drop with cgr on this queue */
826                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
827                 cgr_rx->cb = NULL;
828                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
829                                       &cgr_opts);
830                 if (ret) {
831                         DPAA_PMD_WARN(
832                                 "rx taildrop init fail on rx fqid %d (ret=%d)",
833                                 fqid, ret);
834                         goto without_cgr;
835                 }
836                 opts.we_mask |= QM_INITFQ_WE_CGID;
837                 opts.fqd.cgid = cgr_rx->cgrid;
838                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
839         }
840 without_cgr:
841         ret = qman_init_fq(fq, flags, &opts);
842         if (ret)
843                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
844         return ret;
845 }
846
847 /* Initialise a Tx FQ */
848 static int dpaa_tx_queue_init(struct qman_fq *fq,
849                               struct fman_if *fman_intf)
850 {
851         struct qm_mcc_initfq opts = {0};
852         int ret;
853
854         PMD_INIT_FUNC_TRACE();
855
856         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
857                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
858         if (ret) {
859                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
860                 return ret;
861         }
862         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
863                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
864         opts.fqd.dest.channel = fman_intf->tx_channel_id;
865         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
866         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
867         opts.fqd.context_b = 0;
868         /* no tx-confirmation */
869         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
870         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
871         DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
872         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
873         if (ret)
874                 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
875         return ret;
876 }
877
878 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
879 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
880 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
881 {
882         struct qm_mcc_initfq opts = {0};
883         int ret;
884
885         PMD_INIT_FUNC_TRACE();
886
887         ret = qman_reserve_fqid(fqid);
888         if (ret) {
889                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
890                         fqid, ret);
891                 return -EINVAL;
892         }
893         /* "map" this Rx FQ to one of the interfaces Tx FQID */
894         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
895         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
896         if (ret) {
897                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
898                         fqid, ret);
899                 return ret;
900         }
901         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
902         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
903         ret = qman_init_fq(fq, 0, &opts);
904         if (ret)
905                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
906                             fqid, ret);
907         return ret;
908 }
909 #endif
910
911 /* Initialise a network interface */
912 static int
913 dpaa_dev_init(struct rte_eth_dev *eth_dev)
914 {
915         int num_cores, num_rx_fqs, fqid;
916         int loop, ret = 0;
917         int dev_id;
918         struct rte_dpaa_device *dpaa_device;
919         struct dpaa_if *dpaa_intf;
920         struct fm_eth_port_cfg *cfg;
921         struct fman_if *fman_intf;
922         struct fman_if_bpool *bp, *tmp_bp;
923         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
924
925         PMD_INIT_FUNC_TRACE();
926
927         /* For secondary processes, the primary has done all the work */
928         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
929                 return 0;
930
931         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
932         dev_id = dpaa_device->id.dev_id;
933         dpaa_intf = eth_dev->data->dev_private;
934         cfg = &dpaa_netcfg->port_cfg[dev_id];
935         fman_intf = cfg->fman_if;
936
937         dpaa_intf->name = dpaa_device->name;
938
939         /* save fman_if & cfg in the interface struture */
940         dpaa_intf->fif = fman_intf;
941         dpaa_intf->ifid = dev_id;
942         dpaa_intf->cfg = cfg;
943
944         /* Initialize Rx FQ's */
945         if (getenv("DPAA_NUM_RX_QUEUES"))
946                 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
947         else
948                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
949
950         /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
951          * queues.
952          */
953         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
954                 DPAA_PMD_ERR("Invalid number of RX queues\n");
955                 return -EINVAL;
956         }
957
958         dpaa_intf->rx_queues = rte_zmalloc(NULL,
959                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
960
961         /* If congestion control is enabled globally*/
962         if (td_threshold) {
963                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
964                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
965
966                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
967                 if (ret != num_rx_fqs) {
968                         DPAA_PMD_WARN("insufficient CGRIDs available");
969                         return -EINVAL;
970                 }
971         } else {
972                 dpaa_intf->cgr_rx = NULL;
973         }
974
975         for (loop = 0; loop < num_rx_fqs; loop++) {
976                 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
977                         DPAA_PCD_FQID_MULTIPLIER + loop;
978
979                 if (dpaa_intf->cgr_rx)
980                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
981
982                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
983                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
984                         fqid);
985                 if (ret)
986                         return ret;
987                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
988         }
989         dpaa_intf->nb_rx_queues = num_rx_fqs;
990
991         /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
992         num_cores = rte_lcore_count();
993         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
994                 num_cores, MAX_CACHELINE);
995         if (!dpaa_intf->tx_queues)
996                 return -ENOMEM;
997
998         for (loop = 0; loop < num_cores; loop++) {
999                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1000                                          fman_intf);
1001                 if (ret)
1002                         return ret;
1003                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1004         }
1005         dpaa_intf->nb_tx_queues = num_cores;
1006
1007 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1008         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1009                 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1010         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1011         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1012                 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1013         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1014 #endif
1015
1016         DPAA_PMD_DEBUG("All frame queues created");
1017
1018         /* Get the initial configuration for flow control */
1019         dpaa_fc_set_default(dpaa_intf);
1020
1021         /* reset bpool list, initialize bpool dynamically */
1022         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1023                 list_del(&bp->node);
1024                 free(bp);
1025         }
1026
1027         /* Populate ethdev structure */
1028         eth_dev->dev_ops = &dpaa_devops;
1029         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1030         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1031
1032         /* Allocate memory for storing MAC addresses */
1033         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1034                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1035         if (eth_dev->data->mac_addrs == NULL) {
1036                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1037                                                 "store MAC addresses",
1038                                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1039                 rte_free(dpaa_intf->cgr_rx);
1040                 rte_free(dpaa_intf->rx_queues);
1041                 rte_free(dpaa_intf->tx_queues);
1042                 dpaa_intf->rx_queues = NULL;
1043                 dpaa_intf->tx_queues = NULL;
1044                 dpaa_intf->nb_rx_queues = 0;
1045                 dpaa_intf->nb_tx_queues = 0;
1046                 return -ENOMEM;
1047         }
1048
1049         /* copy the primary mac address */
1050         ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
1051
1052         RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1053                 dpaa_device->name,
1054                 fman_intf->mac_addr.addr_bytes[0],
1055                 fman_intf->mac_addr.addr_bytes[1],
1056                 fman_intf->mac_addr.addr_bytes[2],
1057                 fman_intf->mac_addr.addr_bytes[3],
1058                 fman_intf->mac_addr.addr_bytes[4],
1059                 fman_intf->mac_addr.addr_bytes[5]);
1060
1061         /* Disable RX mode */
1062         fman_if_discard_rx_errors(fman_intf);
1063         fman_if_disable_rx(fman_intf);
1064         /* Disable promiscuous mode */
1065         fman_if_promiscuous_disable(fman_intf);
1066         /* Disable multicast */
1067         fman_if_reset_mcast_filter_table(fman_intf);
1068         /* Reset interface statistics */
1069         fman_if_stats_reset(fman_intf);
1070
1071         return 0;
1072 }
1073
1074 static int
1075 dpaa_dev_uninit(struct rte_eth_dev *dev)
1076 {
1077         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1078         int loop;
1079
1080         PMD_INIT_FUNC_TRACE();
1081
1082         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1083                 return -EPERM;
1084
1085         if (!dpaa_intf) {
1086                 DPAA_PMD_WARN("Already closed or not started");
1087                 return -1;
1088         }
1089
1090         dpaa_eth_dev_close(dev);
1091
1092         /* release configuration memory */
1093         if (dpaa_intf->fc_conf)
1094                 rte_free(dpaa_intf->fc_conf);
1095
1096         /* Release RX congestion Groups */
1097         if (dpaa_intf->cgr_rx) {
1098                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1099                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1100
1101                 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1102                                          dpaa_intf->nb_rx_queues);
1103         }
1104
1105         rte_free(dpaa_intf->cgr_rx);
1106         dpaa_intf->cgr_rx = NULL;
1107
1108         rte_free(dpaa_intf->rx_queues);
1109         dpaa_intf->rx_queues = NULL;
1110
1111         rte_free(dpaa_intf->tx_queues);
1112         dpaa_intf->tx_queues = NULL;
1113
1114         /* free memory for storing MAC addresses */
1115         rte_free(dev->data->mac_addrs);
1116         dev->data->mac_addrs = NULL;
1117
1118         dev->dev_ops = NULL;
1119         dev->rx_pkt_burst = NULL;
1120         dev->tx_pkt_burst = NULL;
1121
1122         return 0;
1123 }
1124
1125 static int
1126 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1127                struct rte_dpaa_device *dpaa_dev)
1128 {
1129         int diag;
1130         int ret;
1131         struct rte_eth_dev *eth_dev;
1132
1133         PMD_INIT_FUNC_TRACE();
1134
1135         /* In case of secondary process, the device is already configured
1136          * and no further action is required, except portal initialization
1137          * and verifying secondary attachment to port name.
1138          */
1139         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1140                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1141                 if (!eth_dev)
1142                         return -ENOMEM;
1143                 return 0;
1144         }
1145
1146         if (!is_global_init) {
1147                 /* One time load of Qman/Bman drivers */
1148                 ret = qman_global_init();
1149                 if (ret) {
1150                         DPAA_PMD_ERR("QMAN initialization failed: %d",
1151                                      ret);
1152                         return ret;
1153                 }
1154                 ret = bman_global_init();
1155                 if (ret) {
1156                         DPAA_PMD_ERR("BMAN initialization failed: %d",
1157                                      ret);
1158                         return ret;
1159                 }
1160
1161                 is_global_init = 1;
1162         }
1163
1164         ret = rte_dpaa_portal_init((void *)1);
1165         if (ret) {
1166                 DPAA_PMD_ERR("Unable to initialize portal");
1167                 return ret;
1168         }
1169
1170         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1171         if (eth_dev == NULL)
1172                 return -ENOMEM;
1173
1174         eth_dev->data->dev_private = rte_zmalloc(
1175                                         "ethdev private structure",
1176                                         sizeof(struct dpaa_if),
1177                                         RTE_CACHE_LINE_SIZE);
1178         if (!eth_dev->data->dev_private) {
1179                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1180                 rte_eth_dev_release_port(eth_dev);
1181                 return -ENOMEM;
1182         }
1183
1184         eth_dev->device = &dpaa_dev->device;
1185         eth_dev->device->driver = &dpaa_drv->driver;
1186         dpaa_dev->eth_dev = eth_dev;
1187
1188         /* Invoke PMD device initialization function */
1189         diag = dpaa_dev_init(eth_dev);
1190         if (diag == 0)
1191                 return 0;
1192
1193         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1194                 rte_free(eth_dev->data->dev_private);
1195
1196         rte_eth_dev_release_port(eth_dev);
1197         return diag;
1198 }
1199
1200 static int
1201 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1202 {
1203         struct rte_eth_dev *eth_dev;
1204
1205         PMD_INIT_FUNC_TRACE();
1206
1207         eth_dev = dpaa_dev->eth_dev;
1208         dpaa_dev_uninit(eth_dev);
1209
1210         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1211                 rte_free(eth_dev->data->dev_private);
1212
1213         rte_eth_dev_release_port(eth_dev);
1214
1215         return 0;
1216 }
1217
1218 static struct rte_dpaa_driver rte_dpaa_pmd = {
1219         .drv_type = FSL_DPAA_ETH,
1220         .probe = rte_dpaa_probe,
1221         .remove = rte_dpaa_remove,
1222 };
1223
1224 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);