net/dpaa: check status before configuring shared MAC
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2020 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
22 #include <rte_log.h>
23 #include <rte_debug.h>
24 #include <rte_pci.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
29 #include <rte_eal.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <ethdev_driver.h>
33 #include <rte_malloc.h>
34 #include <rte_ring.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
39
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <dpaa_flow.h>
43 #include <rte_pmd_dpaa.h>
44
45 #include <fsl_usd.h>
46 #include <fsl_qman.h>
47 #include <fsl_bman.h>
48 #include <fsl_fman.h>
49 #include <process.h>
50 #include <fmlib/fm_ext.h>
51
52 #define CHECK_INTERVAL         100  /* 100ms */
53 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
54
55 /* Supported Rx offloads */
56 static uint64_t dev_rx_offloads_sup =
57                 RTE_ETH_RX_OFFLOAD_SCATTER;
58
59 /* Rx offloads which cannot be disabled */
60 static uint64_t dev_rx_offloads_nodis =
61                 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
62                 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
63                 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
64                 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
65                 RTE_ETH_RX_OFFLOAD_RSS_HASH;
66
67 /* Supported Tx offloads */
68 static uint64_t dev_tx_offloads_sup =
69                 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
70                 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
71
72 /* Tx offloads which cannot be disabled */
73 static uint64_t dev_tx_offloads_nodis =
74                 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
75                 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
76                 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
77                 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
78                 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
79                 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
80
81 /* Keep track of whether QMAN and BMAN have been globally initialized */
82 static int is_global_init;
83 static int fmc_q = 1;   /* Indicates the use of static fmc for distribution */
84 static int default_q;   /* use default queue - FMC is not executed*/
85 /* At present we only allow up to 4 push mode queues as default - as each of
86  * this queue need dedicated portal and we are short of portals.
87  */
88 #define DPAA_MAX_PUSH_MODE_QUEUE       8
89 #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
90
91 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
92 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
93
94
95 /* Per RX FQ Taildrop in frame count */
96 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
97
98 /* Per TX FQ Taildrop in frame count, disabled by default */
99 static unsigned int td_tx_threshold;
100
101 struct rte_dpaa_xstats_name_off {
102         char name[RTE_ETH_XSTATS_NAME_SIZE];
103         uint32_t offset;
104 };
105
106 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
107         {"rx_align_err",
108                 offsetof(struct dpaa_if_stats, raln)},
109         {"rx_valid_pause",
110                 offsetof(struct dpaa_if_stats, rxpf)},
111         {"rx_fcs_err",
112                 offsetof(struct dpaa_if_stats, rfcs)},
113         {"rx_vlan_frame",
114                 offsetof(struct dpaa_if_stats, rvlan)},
115         {"rx_frame_err",
116                 offsetof(struct dpaa_if_stats, rerr)},
117         {"rx_drop_err",
118                 offsetof(struct dpaa_if_stats, rdrp)},
119         {"rx_undersized",
120                 offsetof(struct dpaa_if_stats, rund)},
121         {"rx_oversize_err",
122                 offsetof(struct dpaa_if_stats, rovr)},
123         {"rx_fragment_pkt",
124                 offsetof(struct dpaa_if_stats, rfrg)},
125         {"tx_valid_pause",
126                 offsetof(struct dpaa_if_stats, txpf)},
127         {"tx_fcs_err",
128                 offsetof(struct dpaa_if_stats, terr)},
129         {"tx_vlan_frame",
130                 offsetof(struct dpaa_if_stats, tvlan)},
131         {"rx_undersized",
132                 offsetof(struct dpaa_if_stats, tund)},
133 };
134
135 static struct rte_dpaa_driver rte_dpaa_pmd;
136
137 static int
138 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
139
140 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
141                                 int wait_to_complete __rte_unused);
142
143 static void dpaa_interrupt_handler(void *param);
144
145 static inline void
146 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
147 {
148         memset(opts, 0, sizeof(struct qm_mcc_initfq));
149         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
150         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
151                            QM_FQCTRL_PREFERINCACHE;
152         opts->fqd.context_a.stashing.exclusive = 0;
153         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
154                 opts->fqd.context_a.stashing.annotation_cl =
155                                                 DPAA_IF_RX_ANNOTATION_STASH;
156         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
157         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
158 }
159
160 static int
161 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
162 {
163         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
164                                 + VLAN_TAG_SIZE;
165         uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
166
167         PMD_INIT_FUNC_TRACE();
168
169         /*
170          * Refuse mtu that requires the support of scattered packets
171          * when this feature has not been enabled before.
172          */
173         if (dev->data->min_rx_buf_size &&
174                 !dev->data->scattered_rx && frame_size > buffsz) {
175                 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
176                 return -EINVAL;
177         }
178
179         /* check <seg size> * <max_seg>  >= max_frame */
180         if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
181                 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
182                 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
183                                 buffsz * DPAA_SGT_MAX_ENTRIES);
184                 return -EINVAL;
185         }
186
187         fman_if_set_maxfrm(dev->process_private, frame_size);
188
189         return 0;
190 }
191
192 static int
193 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
194 {
195         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
196         uint64_t rx_offloads = eth_conf->rxmode.offloads;
197         uint64_t tx_offloads = eth_conf->txmode.offloads;
198         struct dpaa_if *dpaa_intf = dev->data->dev_private;
199         struct rte_device *rdev = dev->device;
200         struct rte_eth_link *link = &dev->data->dev_link;
201         struct rte_dpaa_device *dpaa_dev;
202         struct fman_if *fif = dev->process_private;
203         struct __fman_if *__fif;
204         struct rte_intr_handle *intr_handle;
205         uint32_t max_rx_pktlen;
206         int speed, duplex;
207         int ret, rx_status;
208
209         PMD_INIT_FUNC_TRACE();
210
211         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
212         intr_handle = dpaa_dev->intr_handle;
213         __fif = container_of(fif, struct __fman_if, __if);
214
215         /* Check if interface is enabled in case of shared MAC */
216         if (fif->is_shared_mac) {
217                 rx_status = fman_if_get_rx_status(fif);
218                 if (!rx_status) {
219                         DPAA_PMD_ERR("%s Interface not enabled in kernel!",
220                                      dpaa_intf->name);
221                         return -EHOSTDOWN;
222                 }
223         }
224
225         /* Rx offloads which are enabled by default */
226         if (dev_rx_offloads_nodis & ~rx_offloads) {
227                 DPAA_PMD_INFO(
228                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
229                 " fixed are 0x%" PRIx64,
230                 rx_offloads, dev_rx_offloads_nodis);
231         }
232
233         /* Tx offloads which are enabled by default */
234         if (dev_tx_offloads_nodis & ~tx_offloads) {
235                 DPAA_PMD_INFO(
236                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
237                 " fixed are 0x%" PRIx64,
238                 tx_offloads, dev_tx_offloads_nodis);
239         }
240
241         max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
242                         RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
243         if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
244                 DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
245                         "supported is %d",
246                         max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
247                 max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
248         }
249
250         fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
251
252         if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
253                 DPAA_PMD_DEBUG("enabling scatter mode");
254                 fman_if_set_sg(dev->process_private, 1);
255                 dev->data->scattered_rx = 1;
256         }
257
258         if (!(default_q || fmc_q)) {
259                 if (dpaa_fm_config(dev,
260                         eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
261                         dpaa_write_fm_config_to_file();
262                         DPAA_PMD_ERR("FM port configuration: Failed\n");
263                         return -1;
264                 }
265                 dpaa_write_fm_config_to_file();
266         }
267
268         /* if the interrupts were configured on this devices*/
269         if (intr_handle && rte_intr_fd_get(intr_handle)) {
270                 if (dev->data->dev_conf.intr_conf.lsc != 0)
271                         rte_intr_callback_register(intr_handle,
272                                            dpaa_interrupt_handler,
273                                            (void *)dev);
274
275                 ret = dpaa_intr_enable(__fif->node_name,
276                                        rte_intr_fd_get(intr_handle));
277                 if (ret) {
278                         if (dev->data->dev_conf.intr_conf.lsc != 0) {
279                                 rte_intr_callback_unregister(intr_handle,
280                                         dpaa_interrupt_handler,
281                                         (void *)dev);
282                                 if (ret == EINVAL)
283                                         printf("Failed to enable interrupt: Not Supported\n");
284                                 else
285                                         printf("Failed to enable interrupt\n");
286                         }
287                         dev->data->dev_conf.intr_conf.lsc = 0;
288                         dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
289                 }
290         }
291
292         /* Wait for link status to get updated */
293         if (!link->link_status)
294                 sleep(1);
295
296         /* Configure link only if link is UP*/
297         if (link->link_status) {
298                 if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
299                         /* Start autoneg only if link is not in autoneg mode */
300                         if (!link->link_autoneg)
301                                 dpaa_restart_link_autoneg(__fif->node_name);
302                 } else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
303                         switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
304                         case RTE_ETH_LINK_SPEED_10M_HD:
305                                 speed = RTE_ETH_SPEED_NUM_10M;
306                                 duplex = RTE_ETH_LINK_HALF_DUPLEX;
307                                 break;
308                         case RTE_ETH_LINK_SPEED_10M:
309                                 speed = RTE_ETH_SPEED_NUM_10M;
310                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
311                                 break;
312                         case RTE_ETH_LINK_SPEED_100M_HD:
313                                 speed = RTE_ETH_SPEED_NUM_100M;
314                                 duplex = RTE_ETH_LINK_HALF_DUPLEX;
315                                 break;
316                         case RTE_ETH_LINK_SPEED_100M:
317                                 speed = RTE_ETH_SPEED_NUM_100M;
318                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
319                                 break;
320                         case RTE_ETH_LINK_SPEED_1G:
321                                 speed = RTE_ETH_SPEED_NUM_1G;
322                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
323                                 break;
324                         case RTE_ETH_LINK_SPEED_2_5G:
325                                 speed = RTE_ETH_SPEED_NUM_2_5G;
326                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
327                                 break;
328                         case RTE_ETH_LINK_SPEED_10G:
329                                 speed = RTE_ETH_SPEED_NUM_10G;
330                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
331                                 break;
332                         default:
333                                 speed = RTE_ETH_SPEED_NUM_NONE;
334                                 duplex = RTE_ETH_LINK_FULL_DUPLEX;
335                                 break;
336                         }
337                         /* Set link speed */
338                         dpaa_update_link_speed(__fif->node_name, speed, duplex);
339                 } else {
340                         /* Manual autoneg - custom advertisement speed. */
341                         printf("Custom Advertisement speeds not supported\n");
342                 }
343         }
344
345         return 0;
346 }
347
348 static const uint32_t *
349 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
350 {
351         static const uint32_t ptypes[] = {
352                 RTE_PTYPE_L2_ETHER,
353                 RTE_PTYPE_L2_ETHER_VLAN,
354                 RTE_PTYPE_L2_ETHER_ARP,
355                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
356                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
357                 RTE_PTYPE_L4_ICMP,
358                 RTE_PTYPE_L4_TCP,
359                 RTE_PTYPE_L4_UDP,
360                 RTE_PTYPE_L4_FRAG,
361                 RTE_PTYPE_L4_TCP,
362                 RTE_PTYPE_L4_UDP,
363                 RTE_PTYPE_L4_SCTP
364         };
365
366         PMD_INIT_FUNC_TRACE();
367
368         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
369                 return ptypes;
370         return NULL;
371 }
372
373 static void dpaa_interrupt_handler(void *param)
374 {
375         struct rte_eth_dev *dev = param;
376         struct rte_device *rdev = dev->device;
377         struct rte_dpaa_device *dpaa_dev;
378         struct rte_intr_handle *intr_handle;
379         uint64_t buf;
380         int bytes_read;
381
382         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
383         intr_handle = dpaa_dev->intr_handle;
384
385         if (rte_intr_fd_get(intr_handle) < 0)
386                 return;
387
388         bytes_read = read(rte_intr_fd_get(intr_handle), &buf,
389                           sizeof(uint64_t));
390         if (bytes_read < 0)
391                 DPAA_PMD_ERR("Error reading eventfd\n");
392         dpaa_eth_link_update(dev, 0);
393         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
394 }
395
396 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
397 {
398         struct dpaa_if *dpaa_intf = dev->data->dev_private;
399
400         PMD_INIT_FUNC_TRACE();
401
402         if (!(default_q || fmc_q))
403                 dpaa_write_fm_config_to_file();
404
405         /* Change tx callback to the real one */
406         if (dpaa_intf->cgr_tx)
407                 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
408         else
409                 dev->tx_pkt_burst = dpaa_eth_queue_tx;
410
411         fman_if_enable_rx(dev->process_private);
412
413         return 0;
414 }
415
416 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
417 {
418         struct fman_if *fif = dev->process_private;
419
420         PMD_INIT_FUNC_TRACE();
421         dev->data->dev_started = 0;
422
423         if (!fif->is_shared_mac)
424                 fman_if_disable_rx(fif);
425         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
426
427         return 0;
428 }
429
430 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
431 {
432         struct fman_if *fif = dev->process_private;
433         struct __fman_if *__fif;
434         struct rte_device *rdev = dev->device;
435         struct rte_dpaa_device *dpaa_dev;
436         struct rte_intr_handle *intr_handle;
437         struct rte_eth_link *link = &dev->data->dev_link;
438         struct dpaa_if *dpaa_intf = dev->data->dev_private;
439         int loop;
440         int ret;
441
442         PMD_INIT_FUNC_TRACE();
443
444         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
445                 return 0;
446
447         if (!dpaa_intf) {
448                 DPAA_PMD_WARN("Already closed or not started");
449                 return -1;
450         }
451
452         /* DPAA FM deconfig */
453         if (!(default_q || fmc_q)) {
454                 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
455                         DPAA_PMD_WARN("DPAA FM deconfig failed\n");
456         }
457
458         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
459         intr_handle = dpaa_dev->intr_handle;
460         __fif = container_of(fif, struct __fman_if, __if);
461
462         ret = dpaa_eth_dev_stop(dev);
463
464         /* Reset link to autoneg */
465         if (link->link_status && !link->link_autoneg)
466                 dpaa_restart_link_autoneg(__fif->node_name);
467
468         if (intr_handle && rte_intr_fd_get(intr_handle) &&
469             dev->data->dev_conf.intr_conf.lsc != 0) {
470                 dpaa_intr_disable(__fif->node_name);
471                 rte_intr_callback_unregister(intr_handle,
472                                              dpaa_interrupt_handler,
473                                              (void *)dev);
474         }
475
476         /* release configuration memory */
477         if (dpaa_intf->fc_conf)
478                 rte_free(dpaa_intf->fc_conf);
479
480         /* Release RX congestion Groups */
481         if (dpaa_intf->cgr_rx) {
482                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
483                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
484         }
485
486         rte_free(dpaa_intf->cgr_rx);
487         dpaa_intf->cgr_rx = NULL;
488         /* Release TX congestion Groups */
489         if (dpaa_intf->cgr_tx) {
490                 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
491                         qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
492                 rte_free(dpaa_intf->cgr_tx);
493                 dpaa_intf->cgr_tx = NULL;
494         }
495
496         rte_free(dpaa_intf->rx_queues);
497         dpaa_intf->rx_queues = NULL;
498
499         rte_free(dpaa_intf->tx_queues);
500         dpaa_intf->tx_queues = NULL;
501
502         return ret;
503 }
504
505 static int
506 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
507                      char *fw_version,
508                      size_t fw_size)
509 {
510         int ret;
511         FILE *svr_file = NULL;
512         unsigned int svr_ver = 0;
513
514         PMD_INIT_FUNC_TRACE();
515
516         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
517         if (!svr_file) {
518                 DPAA_PMD_ERR("Unable to open SoC device");
519                 return -ENOTSUP; /* Not supported on this infra */
520         }
521         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
522                 dpaa_svr_family = svr_ver & SVR_MASK;
523         else
524                 DPAA_PMD_ERR("Unable to read SoC device");
525
526         fclose(svr_file);
527
528         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
529                        svr_ver, fman_ip_rev);
530         if (ret < 0)
531                 return -EINVAL;
532
533         ret += 1; /* add the size of '\0' */
534         if (fw_size < (size_t)ret)
535                 return ret;
536         else
537                 return 0;
538 }
539
540 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
541                              struct rte_eth_dev_info *dev_info)
542 {
543         struct dpaa_if *dpaa_intf = dev->data->dev_private;
544         struct fman_if *fif = dev->process_private;
545
546         DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
547
548         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
549         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
550         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
551         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
552         dev_info->max_hash_mac_addrs = 0;
553         dev_info->max_vfs = 0;
554         dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
555         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
556
557         if (fif->mac_type == fman_mac_1g) {
558                 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
559                                         | RTE_ETH_LINK_SPEED_10M
560                                         | RTE_ETH_LINK_SPEED_100M_HD
561                                         | RTE_ETH_LINK_SPEED_100M
562                                         | RTE_ETH_LINK_SPEED_1G;
563         } else if (fif->mac_type == fman_mac_2_5g) {
564                 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
565                                         | RTE_ETH_LINK_SPEED_10M
566                                         | RTE_ETH_LINK_SPEED_100M_HD
567                                         | RTE_ETH_LINK_SPEED_100M
568                                         | RTE_ETH_LINK_SPEED_1G
569                                         | RTE_ETH_LINK_SPEED_2_5G;
570         } else if (fif->mac_type == fman_mac_10g) {
571                 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
572                                         | RTE_ETH_LINK_SPEED_10M
573                                         | RTE_ETH_LINK_SPEED_100M_HD
574                                         | RTE_ETH_LINK_SPEED_100M
575                                         | RTE_ETH_LINK_SPEED_1G
576                                         | RTE_ETH_LINK_SPEED_2_5G
577                                         | RTE_ETH_LINK_SPEED_10G;
578         } else {
579                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
580                              dpaa_intf->name, fif->mac_type);
581                 return -EINVAL;
582         }
583
584         dev_info->rx_offload_capa = dev_rx_offloads_sup |
585                                         dev_rx_offloads_nodis;
586         dev_info->tx_offload_capa = dev_tx_offloads_sup |
587                                         dev_tx_offloads_nodis;
588         dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
589         dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
590         dev_info->default_rxportconf.nb_queues = 1;
591         dev_info->default_txportconf.nb_queues = 1;
592         dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
593         dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
594
595         return 0;
596 }
597
598 static int
599 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
600                         __rte_unused uint16_t queue_id,
601                         struct rte_eth_burst_mode *mode)
602 {
603         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
604         int ret = -EINVAL;
605         unsigned int i;
606         const struct burst_info {
607                 uint64_t flags;
608                 const char *output;
609         } rx_offload_map[] = {
610                         {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
611                         {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
612                         {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
613                         {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
614                         {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
615                         {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
616         };
617
618         /* Update Rx offload info */
619         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
620                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
621                         snprintf(mode->info, sizeof(mode->info), "%s",
622                                 rx_offload_map[i].output);
623                         ret = 0;
624                         break;
625                 }
626         }
627         return ret;
628 }
629
630 static int
631 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
632                         __rte_unused uint16_t queue_id,
633                         struct rte_eth_burst_mode *mode)
634 {
635         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
636         int ret = -EINVAL;
637         unsigned int i;
638         const struct burst_info {
639                 uint64_t flags;
640                 const char *output;
641         } tx_offload_map[] = {
642                         {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
643                         {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
644                         {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
645                         {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
646                         {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
647                         {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
648                         {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
649                         {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
650         };
651
652         /* Update Tx offload info */
653         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
654                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
655                         snprintf(mode->info, sizeof(mode->info), "%s",
656                                 tx_offload_map[i].output);
657                         ret = 0;
658                         break;
659                 }
660         }
661         return ret;
662 }
663
664 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
665                                 int wait_to_complete)
666 {
667         struct dpaa_if *dpaa_intf = dev->data->dev_private;
668         struct rte_eth_link *link = &dev->data->dev_link;
669         struct fman_if *fif = dev->process_private;
670         struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
671         int ret, ioctl_version;
672         uint8_t count;
673
674         PMD_INIT_FUNC_TRACE();
675
676         ioctl_version = dpaa_get_ioctl_version_number();
677
678         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
679                 for (count = 0; count <= MAX_REPEAT_TIME; count++) {
680                         ret = dpaa_get_link_status(__fif->node_name, link);
681                         if (ret)
682                                 return ret;
683                         if (link->link_status == RTE_ETH_LINK_DOWN &&
684                             wait_to_complete)
685                                 rte_delay_ms(CHECK_INTERVAL);
686                         else
687                                 break;
688                 }
689         } else {
690                 link->link_status = dpaa_intf->valid;
691         }
692
693         if (ioctl_version < 2) {
694                 link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
695                 link->link_autoneg = RTE_ETH_LINK_AUTONEG;
696
697                 if (fif->mac_type == fman_mac_1g)
698                         link->link_speed = RTE_ETH_SPEED_NUM_1G;
699                 else if (fif->mac_type == fman_mac_2_5g)
700                         link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
701                 else if (fif->mac_type == fman_mac_10g)
702                         link->link_speed = RTE_ETH_SPEED_NUM_10G;
703                 else
704                         DPAA_PMD_ERR("invalid link_speed: %s, %d",
705                                      dpaa_intf->name, fif->mac_type);
706         }
707
708         DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
709                       link->link_status ? "Up" : "Down");
710         return 0;
711 }
712
713 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
714                                struct rte_eth_stats *stats)
715 {
716         PMD_INIT_FUNC_TRACE();
717
718         fman_if_stats_get(dev->process_private, stats);
719         return 0;
720 }
721
722 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
723 {
724         PMD_INIT_FUNC_TRACE();
725
726         fman_if_stats_reset(dev->process_private);
727
728         return 0;
729 }
730
731 static int
732 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
733                     unsigned int n)
734 {
735         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
736         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
737
738         if (n < num)
739                 return num;
740
741         if (xstats == NULL)
742                 return 0;
743
744         fman_if_stats_get_all(dev->process_private, values,
745                               sizeof(struct dpaa_if_stats) / 8);
746
747         for (i = 0; i < num; i++) {
748                 xstats[i].id = i;
749                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
750         }
751         return i;
752 }
753
754 static int
755 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
756                       struct rte_eth_xstat_name *xstats_names,
757                       unsigned int limit)
758 {
759         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
760
761         if (limit < stat_cnt)
762                 return stat_cnt;
763
764         if (xstats_names != NULL)
765                 for (i = 0; i < stat_cnt; i++)
766                         strlcpy(xstats_names[i].name,
767                                 dpaa_xstats_strings[i].name,
768                                 sizeof(xstats_names[i].name));
769
770         return stat_cnt;
771 }
772
773 static int
774 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
775                       uint64_t *values, unsigned int n)
776 {
777         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
778         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
779
780         if (!ids) {
781                 if (n < stat_cnt)
782                         return stat_cnt;
783
784                 if (!values)
785                         return 0;
786
787                 fman_if_stats_get_all(dev->process_private, values_copy,
788                                       sizeof(struct dpaa_if_stats) / 8);
789
790                 for (i = 0; i < stat_cnt; i++)
791                         values[i] =
792                                 values_copy[dpaa_xstats_strings[i].offset / 8];
793
794                 return stat_cnt;
795         }
796
797         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
798
799         for (i = 0; i < n; i++) {
800                 if (ids[i] >= stat_cnt) {
801                         DPAA_PMD_ERR("id value isn't valid");
802                         return -1;
803                 }
804                 values[i] = values_copy[ids[i]];
805         }
806         return n;
807 }
808
809 static int
810 dpaa_xstats_get_names_by_id(
811         struct rte_eth_dev *dev,
812         const uint64_t *ids,
813         struct rte_eth_xstat_name *xstats_names,
814         unsigned int limit)
815 {
816         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
817         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
818
819         if (!ids)
820                 return dpaa_xstats_get_names(dev, xstats_names, limit);
821
822         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
823
824         for (i = 0; i < limit; i++) {
825                 if (ids[i] >= stat_cnt) {
826                         DPAA_PMD_ERR("id value isn't valid");
827                         return -1;
828                 }
829                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
830         }
831         return limit;
832 }
833
834 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
835 {
836         PMD_INIT_FUNC_TRACE();
837
838         fman_if_promiscuous_enable(dev->process_private);
839
840         return 0;
841 }
842
843 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
844 {
845         PMD_INIT_FUNC_TRACE();
846
847         fman_if_promiscuous_disable(dev->process_private);
848
849         return 0;
850 }
851
852 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
853 {
854         PMD_INIT_FUNC_TRACE();
855
856         fman_if_set_mcast_filter_table(dev->process_private);
857
858         return 0;
859 }
860
861 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
862 {
863         PMD_INIT_FUNC_TRACE();
864
865         fman_if_reset_mcast_filter_table(dev->process_private);
866
867         return 0;
868 }
869
870 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
871 {
872         struct dpaa_if *dpaa_intf = dev->data->dev_private;
873         struct fman_if_ic_params icp;
874         uint32_t fd_offset;
875         uint32_t bp_size;
876
877         memset(&icp, 0, sizeof(icp));
878         /* set ICEOF for to the default value , which is 0*/
879         icp.iciof = DEFAULT_ICIOF;
880         icp.iceof = DEFAULT_RX_ICEOF;
881         icp.icsz = DEFAULT_ICSZ;
882         fman_if_set_ic_params(dev->process_private, &icp);
883
884         fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
885         fman_if_set_fdoff(dev->process_private, fd_offset);
886
887         /* Buffer pool size should be equal to Dataroom Size*/
888         bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
889
890         fman_if_set_bp(dev->process_private,
891                        dpaa_intf->bp_info->mp->size,
892                        dpaa_intf->bp_info->bpid, bp_size);
893 }
894
895 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
896                                              int8_t vsp_id, uint32_t bpid)
897 {
898         struct dpaa_if *dpaa_intf = dev->data->dev_private;
899         struct fman_if *fif = dev->process_private;
900
901         if (fif->num_profiles) {
902                 if (vsp_id < 0)
903                         vsp_id = fif->base_profile_id;
904         } else {
905                 if (vsp_id < 0)
906                         vsp_id = 0;
907         }
908
909         if (dpaa_intf->vsp_bpid[vsp_id] &&
910                 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
911                 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
912
913                 return -1;
914         }
915
916         return 0;
917 }
918
919 static
920 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
921                             uint16_t nb_desc,
922                             unsigned int socket_id __rte_unused,
923                             const struct rte_eth_rxconf *rx_conf,
924                             struct rte_mempool *mp)
925 {
926         struct dpaa_if *dpaa_intf = dev->data->dev_private;
927         struct fman_if *fif = dev->process_private;
928         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
929         struct qm_mcc_initfq opts = {0};
930         u32 flags = 0;
931         int ret;
932         u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
933         uint32_t max_rx_pktlen;
934
935         PMD_INIT_FUNC_TRACE();
936
937         if (queue_idx >= dev->data->nb_rx_queues) {
938                 rte_errno = EOVERFLOW;
939                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
940                       (void *)dev, queue_idx, dev->data->nb_rx_queues);
941                 return -rte_errno;
942         }
943
944         /* Rx deferred start is not supported */
945         if (rx_conf->rx_deferred_start) {
946                 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
947                 return -EINVAL;
948         }
949         rxq->nb_desc = UINT16_MAX;
950         rxq->offloads = rx_conf->offloads;
951
952         DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
953                         queue_idx, rxq->fqid);
954
955         if (!fif->num_profiles) {
956                 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
957                         dpaa_intf->bp_info->mp != mp) {
958                         DPAA_PMD_WARN("Multiple pools on same interface not"
959                                       " supported");
960                         return -EINVAL;
961                 }
962         } else {
963                 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
964                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
965                         return -EINVAL;
966                 }
967         }
968
969         if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
970             dpaa_intf->bp_info->mp != mp) {
971                 DPAA_PMD_WARN("Multiple pools on same interface not supported");
972                 return -EINVAL;
973         }
974
975         max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
976                 VLAN_TAG_SIZE;
977         /* Max packet can fit in single buffer */
978         if (max_rx_pktlen <= buffsz) {
979                 ;
980         } else if (dev->data->dev_conf.rxmode.offloads &
981                         RTE_ETH_RX_OFFLOAD_SCATTER) {
982                 if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
983                         DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
984                                 "MaxSGlist %d",
985                                 max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
986                         rte_errno = EOVERFLOW;
987                         return -rte_errno;
988                 }
989         } else {
990                 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
991                      " larger than a single mbuf (%u) and scattered"
992                      " mode has not been requested",
993                      max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
994         }
995
996         dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
997
998         /* For shared interface, it's done in kernel, skip.*/
999         if (!fif->is_shared_mac)
1000                 dpaa_fman_if_pool_setup(dev);
1001
1002         if (fif->num_profiles) {
1003                 int8_t vsp_id = rxq->vsp_id;
1004
1005                 if (vsp_id >= 0) {
1006                         ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1007                                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
1008                                         fif);
1009                         if (ret) {
1010                                 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1011                                 return ret;
1012                         }
1013                 } else {
1014                         DPAA_PMD_INFO("Base profile is associated to"
1015                                 " RXQ fqid:%d\r\n", rxq->fqid);
1016                         if (fif->is_shared_mac) {
1017                                 DPAA_PMD_ERR("Fatal: Base profile is associated"
1018                                              " to shared interface on DPDK.");
1019                                 return -EINVAL;
1020                         }
1021                         dpaa_intf->vsp_bpid[fif->base_profile_id] =
1022                                 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1023                 }
1024         } else {
1025                 dpaa_intf->vsp_bpid[0] =
1026                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1027         }
1028
1029         dpaa_intf->valid = 1;
1030         DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
1031                 fman_if_get_sg_enable(fif), max_rx_pktlen);
1032         /* checking if push mode only, no error check for now */
1033         if (!rxq->is_static &&
1034             dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
1035                 struct qman_portal *qp;
1036                 int q_fd;
1037
1038                 dpaa_push_queue_idx++;
1039                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1040                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
1041                                    QM_FQCTRL_CTXASTASHING |
1042                                    QM_FQCTRL_PREFERINCACHE;
1043                 opts.fqd.context_a.stashing.exclusive = 0;
1044                 /* In multicore scenario stashing becomes a bottleneck on LS1046.
1045                  * So do not enable stashing in this case
1046                  */
1047                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1048                         opts.fqd.context_a.stashing.annotation_cl =
1049                                                 DPAA_IF_RX_ANNOTATION_STASH;
1050                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1051                 opts.fqd.context_a.stashing.context_cl =
1052                                                 DPAA_IF_RX_CONTEXT_STASH;
1053
1054                 /*Create a channel and associate given queue with the channel*/
1055                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
1056                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1057                 opts.fqd.dest.channel = rxq->ch_id;
1058                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
1059                 flags = QMAN_INITFQ_FLAG_SCHED;
1060
1061                 /* Configure tail drop */
1062                 if (dpaa_intf->cgr_rx) {
1063                         opts.we_mask |= QM_INITFQ_WE_CGID;
1064                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
1065                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1066                 }
1067                 ret = qman_init_fq(rxq, flags, &opts);
1068                 if (ret) {
1069                         DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
1070                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1071                         return ret;
1072                 }
1073                 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
1074                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
1075                 } else {
1076                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1077                         rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
1078                 }
1079
1080                 rxq->is_static = true;
1081
1082                 /* Allocate qman specific portals */
1083                 qp = fsl_qman_fq_portal_create(&q_fd);
1084                 if (!qp) {
1085                         DPAA_PMD_ERR("Unable to alloc fq portal");
1086                         return -1;
1087                 }
1088                 rxq->qp = qp;
1089
1090                 /* Set up the device interrupt handler */
1091                 if (dev->intr_handle == NULL) {
1092                         struct rte_dpaa_device *dpaa_dev;
1093                         struct rte_device *rdev = dev->device;
1094
1095                         dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1096                                                 device);
1097                         dev->intr_handle = dpaa_dev->intr_handle;
1098                         if (rte_intr_vec_list_alloc(dev->intr_handle,
1099                                         NULL, dpaa_push_mode_max_queue)) {
1100                                 DPAA_PMD_ERR("intr_vec alloc failed");
1101                                 return -ENOMEM;
1102                         }
1103                         if (rte_intr_nb_efd_set(dev->intr_handle,
1104                                         dpaa_push_mode_max_queue))
1105                                 return -rte_errno;
1106
1107                         if (rte_intr_max_intr_set(dev->intr_handle,
1108                                         dpaa_push_mode_max_queue))
1109                                 return -rte_errno;
1110                 }
1111
1112                 if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT))
1113                         return -rte_errno;
1114
1115                 if (rte_intr_vec_list_index_set(dev->intr_handle,
1116                                                 queue_idx, queue_idx + 1))
1117                         return -rte_errno;
1118
1119                 if (rte_intr_efds_index_set(dev->intr_handle, queue_idx,
1120                                                    q_fd))
1121                         return -rte_errno;
1122
1123                 rxq->q_fd = q_fd;
1124         }
1125         rxq->bp_array = rte_dpaa_bpid_info;
1126         dev->data->rx_queues[queue_idx] = rxq;
1127
1128         /* configure the CGR size as per the desc size */
1129         if (dpaa_intf->cgr_rx) {
1130                 struct qm_mcc_initcgr cgr_opts = {0};
1131
1132                 rxq->nb_desc = nb_desc;
1133                 /* Enable tail drop with cgr on this queue */
1134                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1135                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1136                 if (ret) {
1137                         DPAA_PMD_WARN(
1138                                 "rx taildrop modify fail on fqid %d (ret=%d)",
1139                                 rxq->fqid, ret);
1140                 }
1141         }
1142         /* Enable main queue to receive error packets also by default */
1143         fman_if_set_err_fqid(fif, rxq->fqid);
1144         return 0;
1145 }
1146
1147 int
1148 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1149                 int eth_rx_queue_id,
1150                 u16 ch_id,
1151                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1152 {
1153         int ret;
1154         u32 flags = 0;
1155         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1156         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1157         struct qm_mcc_initfq opts = {0};
1158
1159         if (dpaa_push_mode_max_queue)
1160                 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1161                               "PUSH mode already enabled for first %d queues.\n"
1162                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1163                               dpaa_push_mode_max_queue);
1164
1165         dpaa_poll_queue_default_config(&opts);
1166
1167         switch (queue_conf->ev.sched_type) {
1168         case RTE_SCHED_TYPE_ATOMIC:
1169                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1170                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1171                  * configuration with HOLD_ACTIVE setting
1172                  */
1173                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1174                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1175                 break;
1176         case RTE_SCHED_TYPE_ORDERED:
1177                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1178                 return -1;
1179         default:
1180                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1181                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1182                 break;
1183         }
1184
1185         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1186         opts.fqd.dest.channel = ch_id;
1187         opts.fqd.dest.wq = queue_conf->ev.priority;
1188
1189         if (dpaa_intf->cgr_rx) {
1190                 opts.we_mask |= QM_INITFQ_WE_CGID;
1191                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1192                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1193         }
1194
1195         flags = QMAN_INITFQ_FLAG_SCHED;
1196
1197         ret = qman_init_fq(rxq, flags, &opts);
1198         if (ret) {
1199                 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1200                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1201                 return ret;
1202         }
1203
1204         /* copy configuration which needs to be filled during dequeue */
1205         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1206         dev->data->rx_queues[eth_rx_queue_id] = rxq;
1207
1208         return ret;
1209 }
1210
1211 int
1212 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1213                 int eth_rx_queue_id)
1214 {
1215         struct qm_mcc_initfq opts;
1216         int ret;
1217         u32 flags = 0;
1218         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1219         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1220
1221         dpaa_poll_queue_default_config(&opts);
1222
1223         if (dpaa_intf->cgr_rx) {
1224                 opts.we_mask |= QM_INITFQ_WE_CGID;
1225                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1226                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1227         }
1228
1229         ret = qman_init_fq(rxq, flags, &opts);
1230         if (ret) {
1231                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1232                              rxq->fqid, ret);
1233         }
1234
1235         rxq->cb.dqrr_dpdk_cb = NULL;
1236         dev->data->rx_queues[eth_rx_queue_id] = NULL;
1237
1238         return 0;
1239 }
1240
1241 static
1242 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1243                             uint16_t nb_desc __rte_unused,
1244                 unsigned int socket_id __rte_unused,
1245                 const struct rte_eth_txconf *tx_conf)
1246 {
1247         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1248         struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
1249
1250         PMD_INIT_FUNC_TRACE();
1251
1252         /* Tx deferred start is not supported */
1253         if (tx_conf->tx_deferred_start) {
1254                 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1255                 return -EINVAL;
1256         }
1257         txq->nb_desc = UINT16_MAX;
1258         txq->offloads = tx_conf->offloads;
1259
1260         if (queue_idx >= dev->data->nb_tx_queues) {
1261                 rte_errno = EOVERFLOW;
1262                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1263                       (void *)dev, queue_idx, dev->data->nb_tx_queues);
1264                 return -rte_errno;
1265         }
1266
1267         DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1268                         queue_idx, txq->fqid);
1269         dev->data->tx_queues[queue_idx] = txq;
1270
1271         return 0;
1272 }
1273
1274 static uint32_t
1275 dpaa_dev_rx_queue_count(void *rx_queue)
1276 {
1277         struct qman_fq *rxq = rx_queue;
1278         u32 frm_cnt = 0;
1279
1280         PMD_INIT_FUNC_TRACE();
1281
1282         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1283                 DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
1284                                rx_queue, frm_cnt);
1285         }
1286         return frm_cnt;
1287 }
1288
1289 static int dpaa_link_down(struct rte_eth_dev *dev)
1290 {
1291         struct fman_if *fif = dev->process_private;
1292         struct __fman_if *__fif;
1293
1294         PMD_INIT_FUNC_TRACE();
1295
1296         __fif = container_of(fif, struct __fman_if, __if);
1297
1298         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1299                 dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
1300         else
1301                 return dpaa_eth_dev_stop(dev);
1302         return 0;
1303 }
1304
1305 static int dpaa_link_up(struct rte_eth_dev *dev)
1306 {
1307         struct fman_if *fif = dev->process_private;
1308         struct __fman_if *__fif;
1309
1310         PMD_INIT_FUNC_TRACE();
1311
1312         __fif = container_of(fif, struct __fman_if, __if);
1313
1314         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1315                 dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
1316         else
1317                 dpaa_eth_dev_start(dev);
1318         return 0;
1319 }
1320
1321 static int
1322 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1323                    struct rte_eth_fc_conf *fc_conf)
1324 {
1325         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1326         struct rte_eth_fc_conf *net_fc;
1327
1328         PMD_INIT_FUNC_TRACE();
1329
1330         if (!(dpaa_intf->fc_conf)) {
1331                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1332                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1333                 if (!dpaa_intf->fc_conf) {
1334                         DPAA_PMD_ERR("unable to save flow control info");
1335                         return -ENOMEM;
1336                 }
1337         }
1338         net_fc = dpaa_intf->fc_conf;
1339
1340         if (fc_conf->high_water < fc_conf->low_water) {
1341                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1342                 return -EINVAL;
1343         }
1344
1345         if (fc_conf->mode == RTE_ETH_FC_NONE) {
1346                 return 0;
1347         } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
1348                  fc_conf->mode == RTE_ETH_FC_FULL) {
1349                 fman_if_set_fc_threshold(dev->process_private,
1350                                          fc_conf->high_water,
1351                                          fc_conf->low_water,
1352                                          dpaa_intf->bp_info->bpid);
1353                 if (fc_conf->pause_time)
1354                         fman_if_set_fc_quanta(dev->process_private,
1355                                               fc_conf->pause_time);
1356         }
1357
1358         /* Save the information in dpaa device */
1359         net_fc->pause_time = fc_conf->pause_time;
1360         net_fc->high_water = fc_conf->high_water;
1361         net_fc->low_water = fc_conf->low_water;
1362         net_fc->send_xon = fc_conf->send_xon;
1363         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1364         net_fc->mode = fc_conf->mode;
1365         net_fc->autoneg = fc_conf->autoneg;
1366
1367         return 0;
1368 }
1369
1370 static int
1371 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1372                    struct rte_eth_fc_conf *fc_conf)
1373 {
1374         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1375         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1376         int ret;
1377
1378         PMD_INIT_FUNC_TRACE();
1379
1380         if (net_fc) {
1381                 fc_conf->pause_time = net_fc->pause_time;
1382                 fc_conf->high_water = net_fc->high_water;
1383                 fc_conf->low_water = net_fc->low_water;
1384                 fc_conf->send_xon = net_fc->send_xon;
1385                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1386                 fc_conf->mode = net_fc->mode;
1387                 fc_conf->autoneg = net_fc->autoneg;
1388                 return 0;
1389         }
1390         ret = fman_if_get_fc_threshold(dev->process_private);
1391         if (ret) {
1392                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1393                 fc_conf->pause_time =
1394                         fman_if_get_fc_quanta(dev->process_private);
1395         } else {
1396                 fc_conf->mode = RTE_ETH_FC_NONE;
1397         }
1398
1399         return 0;
1400 }
1401
1402 static int
1403 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1404                              struct rte_ether_addr *addr,
1405                              uint32_t index,
1406                              __rte_unused uint32_t pool)
1407 {
1408         int ret;
1409
1410         PMD_INIT_FUNC_TRACE();
1411
1412         ret = fman_if_add_mac_addr(dev->process_private,
1413                                    addr->addr_bytes, index);
1414
1415         if (ret)
1416                 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1417         return 0;
1418 }
1419
1420 static void
1421 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1422                           uint32_t index)
1423 {
1424         PMD_INIT_FUNC_TRACE();
1425
1426         fman_if_clear_mac_addr(dev->process_private, index);
1427 }
1428
1429 static int
1430 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1431                        struct rte_ether_addr *addr)
1432 {
1433         int ret;
1434
1435         PMD_INIT_FUNC_TRACE();
1436
1437         ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1438         if (ret)
1439                 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1440
1441         return ret;
1442 }
1443
1444 static int
1445 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1446                          struct rte_eth_rss_conf *rss_conf)
1447 {
1448         struct rte_eth_dev_data *data = dev->data;
1449         struct rte_eth_conf *eth_conf = &data->dev_conf;
1450
1451         PMD_INIT_FUNC_TRACE();
1452
1453         if (!(default_q || fmc_q)) {
1454                 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1455                         DPAA_PMD_ERR("FM port configuration: Failed\n");
1456                         return -1;
1457                 }
1458                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1459         } else {
1460                 DPAA_PMD_ERR("Function not supported\n");
1461                 return -ENOTSUP;
1462         }
1463         return 0;
1464 }
1465
1466 static int
1467 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1468                            struct rte_eth_rss_conf *rss_conf)
1469 {
1470         struct rte_eth_dev_data *data = dev->data;
1471         struct rte_eth_conf *eth_conf = &data->dev_conf;
1472
1473         /* dpaa does not support rss_key, so length should be 0*/
1474         rss_conf->rss_key_len = 0;
1475         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1476         return 0;
1477 }
1478
1479 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1480                                       uint16_t queue_id)
1481 {
1482         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1483         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1484
1485         if (!rxq->is_static)
1486                 return -EINVAL;
1487
1488         return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1489 }
1490
1491 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1492                                        uint16_t queue_id)
1493 {
1494         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1495         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1496         uint32_t temp;
1497         ssize_t temp1;
1498
1499         if (!rxq->is_static)
1500                 return -EINVAL;
1501
1502         qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1503
1504         temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1505         if (temp1 != sizeof(temp))
1506                 DPAA_PMD_ERR("irq read error");
1507
1508         qman_fq_portal_thread_irq(rxq->qp);
1509
1510         return 0;
1511 }
1512
1513 static void
1514 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1515         struct rte_eth_rxq_info *qinfo)
1516 {
1517         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1518         struct qman_fq *rxq;
1519         int ret;
1520
1521         rxq = dev->data->rx_queues[queue_id];
1522
1523         qinfo->mp = dpaa_intf->bp_info->mp;
1524         qinfo->scattered_rx = dev->data->scattered_rx;
1525         qinfo->nb_desc = rxq->nb_desc;
1526
1527         /* Report the HW Rx buffer length to user */
1528         ret = fman_if_get_maxfrm(dev->process_private);
1529         if (ret > 0)
1530                 qinfo->rx_buf_size = ret;
1531
1532         qinfo->conf.rx_free_thresh = 1;
1533         qinfo->conf.rx_drop_en = 1;
1534         qinfo->conf.rx_deferred_start = 0;
1535         qinfo->conf.offloads = rxq->offloads;
1536 }
1537
1538 static void
1539 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1540         struct rte_eth_txq_info *qinfo)
1541 {
1542         struct qman_fq *txq;
1543
1544         txq = dev->data->tx_queues[queue_id];
1545
1546         qinfo->nb_desc = txq->nb_desc;
1547         qinfo->conf.tx_thresh.pthresh = 0;
1548         qinfo->conf.tx_thresh.hthresh = 0;
1549         qinfo->conf.tx_thresh.wthresh = 0;
1550
1551         qinfo->conf.tx_free_thresh = 0;
1552         qinfo->conf.tx_rs_thresh = 0;
1553         qinfo->conf.offloads = txq->offloads;
1554         qinfo->conf.tx_deferred_start = 0;
1555 }
1556
1557 static struct eth_dev_ops dpaa_devops = {
1558         .dev_configure            = dpaa_eth_dev_configure,
1559         .dev_start                = dpaa_eth_dev_start,
1560         .dev_stop                 = dpaa_eth_dev_stop,
1561         .dev_close                = dpaa_eth_dev_close,
1562         .dev_infos_get            = dpaa_eth_dev_info,
1563         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1564
1565         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
1566         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
1567         .rx_burst_mode_get        = dpaa_dev_rx_burst_mode_get,
1568         .tx_burst_mode_get        = dpaa_dev_tx_burst_mode_get,
1569         .rxq_info_get             = dpaa_rxq_info_get,
1570         .txq_info_get             = dpaa_txq_info_get,
1571
1572         .flow_ctrl_get            = dpaa_flow_ctrl_get,
1573         .flow_ctrl_set            = dpaa_flow_ctrl_set,
1574
1575         .link_update              = dpaa_eth_link_update,
1576         .stats_get                = dpaa_eth_stats_get,
1577         .xstats_get               = dpaa_dev_xstats_get,
1578         .xstats_get_by_id         = dpaa_xstats_get_by_id,
1579         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
1580         .xstats_get_names         = dpaa_xstats_get_names,
1581         .xstats_reset             = dpaa_eth_stats_reset,
1582         .stats_reset              = dpaa_eth_stats_reset,
1583         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
1584         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
1585         .allmulticast_enable      = dpaa_eth_multicast_enable,
1586         .allmulticast_disable     = dpaa_eth_multicast_disable,
1587         .mtu_set                  = dpaa_mtu_set,
1588         .dev_set_link_down        = dpaa_link_down,
1589         .dev_set_link_up          = dpaa_link_up,
1590         .mac_addr_add             = dpaa_dev_add_mac_addr,
1591         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
1592         .mac_addr_set             = dpaa_dev_set_mac_addr,
1593
1594         .fw_version_get           = dpaa_fw_version_get,
1595
1596         .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
1597         .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
1598         .rss_hash_update          = dpaa_dev_rss_hash_update,
1599         .rss_hash_conf_get        = dpaa_dev_rss_hash_conf_get,
1600 };
1601
1602 static bool
1603 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1604 {
1605         if (strcmp(dev->device->driver->name,
1606                    drv->driver.name))
1607                 return false;
1608
1609         return true;
1610 }
1611
1612 static bool
1613 is_dpaa_supported(struct rte_eth_dev *dev)
1614 {
1615         return is_device_supported(dev, &rte_dpaa_pmd);
1616 }
1617
1618 int
1619 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1620 {
1621         struct rte_eth_dev *dev;
1622
1623         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1624
1625         dev = &rte_eth_devices[port];
1626
1627         if (!is_dpaa_supported(dev))
1628                 return -ENOTSUP;
1629
1630         if (on)
1631                 fman_if_loopback_enable(dev->process_private);
1632         else
1633                 fman_if_loopback_disable(dev->process_private);
1634
1635         return 0;
1636 }
1637
1638 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1639                                struct fman_if *fman_intf)
1640 {
1641         struct rte_eth_fc_conf *fc_conf;
1642         int ret;
1643
1644         PMD_INIT_FUNC_TRACE();
1645
1646         if (!(dpaa_intf->fc_conf)) {
1647                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1648                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1649                 if (!dpaa_intf->fc_conf) {
1650                         DPAA_PMD_ERR("unable to save flow control info");
1651                         return -ENOMEM;
1652                 }
1653         }
1654         fc_conf = dpaa_intf->fc_conf;
1655         ret = fman_if_get_fc_threshold(fman_intf);
1656         if (ret) {
1657                 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
1658                 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1659         } else {
1660                 fc_conf->mode = RTE_ETH_FC_NONE;
1661         }
1662
1663         return 0;
1664 }
1665
1666 /* Initialise an Rx FQ */
1667 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1668                               uint32_t fqid)
1669 {
1670         struct qm_mcc_initfq opts = {0};
1671         int ret;
1672         u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1673         struct qm_mcc_initcgr cgr_opts = {
1674                 .we_mask = QM_CGR_WE_CS_THRES |
1675                                 QM_CGR_WE_CSTD_EN |
1676                                 QM_CGR_WE_MODE,
1677                 .cgr = {
1678                         .cstd_en = QM_CGR_EN,
1679                         .mode = QMAN_CGR_MODE_FRAME
1680                 }
1681         };
1682
1683         if (fmc_q || default_q) {
1684                 ret = qman_reserve_fqid(fqid);
1685                 if (ret) {
1686                         DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
1687                                      fqid, ret);
1688                         return -EINVAL;
1689                 }
1690         }
1691
1692         DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1693         ret = qman_create_fq(fqid, flags, fq);
1694         if (ret) {
1695                 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1696                         fqid, ret);
1697                 return ret;
1698         }
1699         fq->is_static = false;
1700
1701         dpaa_poll_queue_default_config(&opts);
1702
1703         if (cgr_rx) {
1704                 /* Enable tail drop with cgr on this queue */
1705                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1706                 cgr_rx->cb = NULL;
1707                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1708                                       &cgr_opts);
1709                 if (ret) {
1710                         DPAA_PMD_WARN(
1711                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1712                                 fq->fqid, ret);
1713                         goto without_cgr;
1714                 }
1715                 opts.we_mask |= QM_INITFQ_WE_CGID;
1716                 opts.fqd.cgid = cgr_rx->cgrid;
1717                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1718         }
1719 without_cgr:
1720         ret = qman_init_fq(fq, 0, &opts);
1721         if (ret)
1722                 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1723         return ret;
1724 }
1725
1726 /* Initialise a Tx FQ */
1727 static int dpaa_tx_queue_init(struct qman_fq *fq,
1728                               struct fman_if *fman_intf,
1729                               struct qman_cgr *cgr_tx)
1730 {
1731         struct qm_mcc_initfq opts = {0};
1732         struct qm_mcc_initcgr cgr_opts = {
1733                 .we_mask = QM_CGR_WE_CS_THRES |
1734                                 QM_CGR_WE_CSTD_EN |
1735                                 QM_CGR_WE_MODE,
1736                 .cgr = {
1737                         .cstd_en = QM_CGR_EN,
1738                         .mode = QMAN_CGR_MODE_FRAME
1739                 }
1740         };
1741         int ret;
1742
1743         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1744                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1745         if (ret) {
1746                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1747                 return ret;
1748         }
1749         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1750                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1751         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1752         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1753         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1754         opts.fqd.context_b = 0;
1755         /* no tx-confirmation */
1756         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1757         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1758         DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1759
1760         if (cgr_tx) {
1761                 /* Enable tail drop with cgr on this queue */
1762                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1763                                       td_tx_threshold, 0);
1764                 cgr_tx->cb = NULL;
1765                 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1766                                       &cgr_opts);
1767                 if (ret) {
1768                         DPAA_PMD_WARN(
1769                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1770                                 fq->fqid, ret);
1771                         goto without_cgr;
1772                 }
1773                 opts.we_mask |= QM_INITFQ_WE_CGID;
1774                 opts.fqd.cgid = cgr_tx->cgrid;
1775                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1776                 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1777                                 td_tx_threshold);
1778         }
1779 without_cgr:
1780         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1781         if (ret)
1782                 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1783         return ret;
1784 }
1785
1786 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1787 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1788 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1789 {
1790         struct qm_mcc_initfq opts = {0};
1791         int ret;
1792
1793         PMD_INIT_FUNC_TRACE();
1794
1795         ret = qman_reserve_fqid(fqid);
1796         if (ret) {
1797                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1798                         fqid, ret);
1799                 return -EINVAL;
1800         }
1801         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1802         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1803         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1804         if (ret) {
1805                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1806                         fqid, ret);
1807                 return ret;
1808         }
1809         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1810         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1811         ret = qman_init_fq(fq, 0, &opts);
1812         if (ret)
1813                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1814                             fqid, ret);
1815         return ret;
1816 }
1817 #endif
1818
1819 /* Initialise a network interface */
1820 static int
1821 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1822 {
1823         struct rte_dpaa_device *dpaa_device;
1824         struct fm_eth_port_cfg *cfg;
1825         struct dpaa_if *dpaa_intf;
1826         struct fman_if *fman_intf;
1827         int dev_id;
1828
1829         PMD_INIT_FUNC_TRACE();
1830
1831         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1832         dev_id = dpaa_device->id.dev_id;
1833         cfg = dpaa_get_eth_port_cfg(dev_id);
1834         fman_intf = cfg->fman_if;
1835         eth_dev->process_private = fman_intf;
1836
1837         /* Plugging of UCODE burst API not supported in Secondary */
1838         dpaa_intf = eth_dev->data->dev_private;
1839         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1840         if (dpaa_intf->cgr_tx)
1841                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1842         else
1843                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1844 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1845         qman_set_fq_lookup_table(
1846                 dpaa_intf->rx_queues->qman_fq_lookup_table);
1847 #endif
1848
1849         return 0;
1850 }
1851
1852 /* Initialise a network interface */
1853 static int
1854 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1855 {
1856         int num_rx_fqs, fqid;
1857         int loop, ret = 0;
1858         int dev_id;
1859         struct rte_dpaa_device *dpaa_device;
1860         struct dpaa_if *dpaa_intf;
1861         struct fm_eth_port_cfg *cfg;
1862         struct fman_if *fman_intf;
1863         struct fman_if_bpool *bp, *tmp_bp;
1864         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1865         uint32_t cgrid_tx[MAX_DPAA_CORES];
1866         uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1867         int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1868         int8_t vsp_id = -1;
1869
1870         PMD_INIT_FUNC_TRACE();
1871
1872         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1873         dev_id = dpaa_device->id.dev_id;
1874         dpaa_intf = eth_dev->data->dev_private;
1875         cfg = dpaa_get_eth_port_cfg(dev_id);
1876         fman_intf = cfg->fman_if;
1877
1878         dpaa_intf->name = dpaa_device->name;
1879
1880         /* save fman_if & cfg in the interface structure */
1881         eth_dev->process_private = fman_intf;
1882         dpaa_intf->ifid = dev_id;
1883         dpaa_intf->cfg = cfg;
1884
1885         memset((char *)dev_rx_fqids, 0,
1886                 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1887
1888         memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1889
1890         /* Initialize Rx FQ's */
1891         if (default_q) {
1892                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1893         } else if (fmc_q) {
1894                 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1895                                                 dev_vspids,
1896                                                 DPAA_MAX_NUM_PCD_QUEUES);
1897                 if (num_rx_fqs < 0) {
1898                         DPAA_PMD_ERR("%s FMC initializes failed!",
1899                                 dpaa_intf->name);
1900                         goto free_rx;
1901                 }
1902                 if (!num_rx_fqs) {
1903                         DPAA_PMD_WARN("%s is not configured by FMC.",
1904                                 dpaa_intf->name);
1905                 }
1906         } else {
1907                 /* FMCLESS mode, load balance to multiple cores.*/
1908                 num_rx_fqs = rte_lcore_count();
1909         }
1910
1911         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1912          * queues.
1913          */
1914         if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1915                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1916                 return -EINVAL;
1917         }
1918
1919         if (num_rx_fqs > 0) {
1920                 dpaa_intf->rx_queues = rte_zmalloc(NULL,
1921                         sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1922                 if (!dpaa_intf->rx_queues) {
1923                         DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1924                         return -ENOMEM;
1925                 }
1926         } else {
1927                 dpaa_intf->rx_queues = NULL;
1928         }
1929
1930         memset(cgrid, 0, sizeof(cgrid));
1931         memset(cgrid_tx, 0, sizeof(cgrid_tx));
1932
1933         /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1934          * Tx tail drop is disabled.
1935          */
1936         if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1937                 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1938                 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1939                                td_tx_threshold);
1940                 /* if a very large value is being configured */
1941                 if (td_tx_threshold > UINT16_MAX)
1942                         td_tx_threshold = CGR_RX_PERFQ_THRESH;
1943         }
1944
1945         /* If congestion control is enabled globally*/
1946         if (num_rx_fqs > 0 && td_threshold) {
1947                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1948                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1949                 if (!dpaa_intf->cgr_rx) {
1950                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1951                         ret = -ENOMEM;
1952                         goto free_rx;
1953                 }
1954
1955                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1956                 if (ret != num_rx_fqs) {
1957                         DPAA_PMD_WARN("insufficient CGRIDs available");
1958                         ret = -EINVAL;
1959                         goto free_rx;
1960                 }
1961         } else {
1962                 dpaa_intf->cgr_rx = NULL;
1963         }
1964
1965         if (!fmc_q && !default_q) {
1966                 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1967                                             num_rx_fqs, 0);
1968                 if (ret < 0) {
1969                         DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1970                         goto free_rx;
1971                 }
1972         }
1973
1974         for (loop = 0; loop < num_rx_fqs; loop++) {
1975                 if (default_q)
1976                         fqid = cfg->rx_def;
1977                 else
1978                         fqid = dev_rx_fqids[loop];
1979
1980                 vsp_id = dev_vspids[loop];
1981
1982                 if (dpaa_intf->cgr_rx)
1983                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1984
1985                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1986                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1987                         fqid);
1988                 if (ret)
1989                         goto free_rx;
1990                 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
1991                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1992         }
1993         dpaa_intf->nb_rx_queues = num_rx_fqs;
1994
1995         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1996         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1997                 MAX_DPAA_CORES, MAX_CACHELINE);
1998         if (!dpaa_intf->tx_queues) {
1999                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
2000                 ret = -ENOMEM;
2001                 goto free_rx;
2002         }
2003
2004         /* If congestion control is enabled globally*/
2005         if (td_tx_threshold) {
2006                 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
2007                         sizeof(struct qman_cgr) * MAX_DPAA_CORES,
2008                         MAX_CACHELINE);
2009                 if (!dpaa_intf->cgr_tx) {
2010                         DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
2011                         ret = -ENOMEM;
2012                         goto free_rx;
2013                 }
2014
2015                 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
2016                                              1, 0);
2017                 if (ret != MAX_DPAA_CORES) {
2018                         DPAA_PMD_WARN("insufficient CGRIDs available");
2019                         ret = -EINVAL;
2020                         goto free_rx;
2021                 }
2022         } else {
2023                 dpaa_intf->cgr_tx = NULL;
2024         }
2025
2026
2027         for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
2028                 if (dpaa_intf->cgr_tx)
2029                         dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
2030
2031                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
2032                         fman_intf,
2033                         dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
2034                 if (ret)
2035                         goto free_tx;
2036                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
2037         }
2038         dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
2039
2040 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2041         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2042                         [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
2043         if (ret) {
2044                 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
2045                 goto free_tx;
2046         }
2047         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
2048         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2049                         [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
2050         if (ret) {
2051                 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
2052                 goto free_tx;
2053         }
2054         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
2055 #endif
2056
2057         DPAA_PMD_DEBUG("All frame queues created");
2058
2059         /* Get the initial configuration for flow control */
2060         dpaa_fc_set_default(dpaa_intf, fman_intf);
2061
2062         /* reset bpool list, initialize bpool dynamically */
2063         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
2064                 list_del(&bp->node);
2065                 rte_free(bp);
2066         }
2067
2068         /* Populate ethdev structure */
2069         eth_dev->dev_ops = &dpaa_devops;
2070         eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
2071         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
2072         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
2073
2074         /* Allocate memory for storing MAC addresses */
2075         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2076                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
2077         if (eth_dev->data->mac_addrs == NULL) {
2078                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
2079                                                 "store MAC addresses",
2080                                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
2081                 ret = -ENOMEM;
2082                 goto free_tx;
2083         }
2084
2085         /* copy the primary mac address */
2086         rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
2087
2088         RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
2089                 dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr));
2090
2091         if (!fman_intf->is_shared_mac) {
2092                 /* Configure error packet handling */
2093                 fman_if_receive_rx_errors(fman_intf,
2094                         FM_FD_RX_STATUS_ERR_MASK);
2095                 /* Disable RX mode */
2096                 fman_if_disable_rx(fman_intf);
2097                 /* Disable promiscuous mode */
2098                 fman_if_promiscuous_disable(fman_intf);
2099                 /* Disable multicast */
2100                 fman_if_reset_mcast_filter_table(fman_intf);
2101                 /* Reset interface statistics */
2102                 fman_if_stats_reset(fman_intf);
2103                 /* Disable SG by default */
2104                 fman_if_set_sg(fman_intf, 0);
2105                 fman_if_set_maxfrm(fman_intf,
2106                                    RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2107         }
2108
2109         return 0;
2110
2111 free_tx:
2112         rte_free(dpaa_intf->tx_queues);
2113         dpaa_intf->tx_queues = NULL;
2114         dpaa_intf->nb_tx_queues = 0;
2115
2116 free_rx:
2117         rte_free(dpaa_intf->cgr_rx);
2118         rte_free(dpaa_intf->cgr_tx);
2119         rte_free(dpaa_intf->rx_queues);
2120         dpaa_intf->rx_queues = NULL;
2121         dpaa_intf->nb_rx_queues = 0;
2122         return ret;
2123 }
2124
2125 static int
2126 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2127                struct rte_dpaa_device *dpaa_dev)
2128 {
2129         int diag;
2130         int ret;
2131         struct rte_eth_dev *eth_dev;
2132
2133         PMD_INIT_FUNC_TRACE();
2134
2135         if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
2136                 RTE_PKTMBUF_HEADROOM) {
2137                 DPAA_PMD_ERR(
2138                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
2139                 RTE_PKTMBUF_HEADROOM,
2140                 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
2141
2142                 return -1;
2143         }
2144
2145         /* In case of secondary process, the device is already configured
2146          * and no further action is required, except portal initialization
2147          * and verifying secondary attachment to port name.
2148          */
2149         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2150                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2151                 if (!eth_dev)
2152                         return -ENOMEM;
2153                 eth_dev->device = &dpaa_dev->device;
2154                 eth_dev->dev_ops = &dpaa_devops;
2155
2156                 ret = dpaa_dev_init_secondary(eth_dev);
2157                 if (ret != 0) {
2158                         RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2159                         return ret;
2160                 }
2161
2162                 rte_eth_dev_probing_finish(eth_dev);
2163                 return 0;
2164         }
2165
2166         if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2167                 if (access("/tmp/fmc.bin", F_OK) == -1) {
2168                         DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2169                         default_q = 1;
2170                 }
2171
2172                 if (!(default_q || fmc_q)) {
2173                         if (dpaa_fm_init()) {
2174                                 DPAA_PMD_ERR("FM init failed\n");
2175                                 return -1;
2176                         }
2177                 }
2178
2179                 /* disabling the default push mode for LS1043 */
2180                 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2181                         dpaa_push_mode_max_queue = 0;
2182
2183                 /* if push mode queues to be enabled. Currently we are allowing
2184                  * only one queue per thread.
2185                  */
2186                 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2187                         dpaa_push_mode_max_queue =
2188                                         atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2189                         if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2190                             dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2191                 }
2192
2193                 is_global_init = 1;
2194         }
2195
2196         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2197                 ret = rte_dpaa_portal_init((void *)1);
2198                 if (ret) {
2199                         DPAA_PMD_ERR("Unable to initialize portal");
2200                         return ret;
2201                 }
2202         }
2203
2204         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2205         if (!eth_dev)
2206                 return -ENOMEM;
2207
2208         eth_dev->data->dev_private =
2209                         rte_zmalloc("ethdev private structure",
2210                                         sizeof(struct dpaa_if),
2211                                         RTE_CACHE_LINE_SIZE);
2212         if (!eth_dev->data->dev_private) {
2213                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
2214                 rte_eth_dev_release_port(eth_dev);
2215                 return -ENOMEM;
2216         }
2217
2218         eth_dev->device = &dpaa_dev->device;
2219         dpaa_dev->eth_dev = eth_dev;
2220
2221         qman_ern_register_cb(dpaa_free_mbuf);
2222
2223         if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2224                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2225
2226         /* Invoke PMD device initialization function */
2227         diag = dpaa_dev_init(eth_dev);
2228         if (diag == 0) {
2229                 rte_eth_dev_probing_finish(eth_dev);
2230                 return 0;
2231         }
2232
2233         rte_eth_dev_release_port(eth_dev);
2234         return diag;
2235 }
2236
2237 static int
2238 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2239 {
2240         struct rte_eth_dev *eth_dev;
2241         int ret;
2242
2243         PMD_INIT_FUNC_TRACE();
2244
2245         eth_dev = dpaa_dev->eth_dev;
2246         dpaa_eth_dev_close(eth_dev);
2247         ret = rte_eth_dev_release_port(eth_dev);
2248
2249         return ret;
2250 }
2251
2252 static void __attribute__((destructor(102))) dpaa_finish(void)
2253 {
2254         /* For secondary, primary will do all the cleanup */
2255         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2256                 return;
2257
2258         if (!(default_q || fmc_q)) {
2259                 unsigned int i;
2260
2261                 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2262                         if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2263                                 struct rte_eth_dev *dev = &rte_eth_devices[i];
2264                                 struct dpaa_if *dpaa_intf =
2265                                         dev->data->dev_private;
2266                                 struct fman_if *fif =
2267                                         dev->process_private;
2268                                 if (dpaa_intf->port_handle)
2269                                         if (dpaa_fm_deconfig(dpaa_intf, fif))
2270                                                 DPAA_PMD_WARN("DPAA FM "
2271                                                         "deconfig failed\n");
2272                                 if (fif->num_profiles) {
2273                                         if (dpaa_port_vsp_cleanup(dpaa_intf,
2274                                                                   fif))
2275                                                 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2276                                 }
2277                         }
2278                 }
2279                 if (is_global_init)
2280                         if (dpaa_fm_term())
2281                                 DPAA_PMD_WARN("DPAA FM term failed\n");
2282
2283                 is_global_init = 0;
2284
2285                 DPAA_PMD_INFO("DPAA fman cleaned up");
2286         }
2287 }
2288
2289 static struct rte_dpaa_driver rte_dpaa_pmd = {
2290         .drv_flags = RTE_DPAA_DRV_INTR_LSC,
2291         .drv_type = FSL_DPAA_ETH,
2292         .probe = rte_dpaa_probe,
2293         .remove = rte_dpaa_remove,
2294 };
2295
2296 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2297 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);