ethdev: fix max Rx packet length
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2020 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
22 #include <rte_log.h>
23 #include <rte_debug.h>
24 #include <rte_pci.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
29 #include <rte_eal.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <ethdev_driver.h>
33 #include <rte_malloc.h>
34 #include <rte_ring.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
39
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <dpaa_flow.h>
43 #include <rte_pmd_dpaa.h>
44
45 #include <fsl_usd.h>
46 #include <fsl_qman.h>
47 #include <fsl_bman.h>
48 #include <fsl_fman.h>
49 #include <process.h>
50 #include <fmlib/fm_ext.h>
51
52 #define CHECK_INTERVAL         100  /* 100ms */
53 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
54
55 /* Supported Rx offloads */
56 static uint64_t dev_rx_offloads_sup =
57                 DEV_RX_OFFLOAD_JUMBO_FRAME |
58                 DEV_RX_OFFLOAD_SCATTER;
59
60 /* Rx offloads which cannot be disabled */
61 static uint64_t dev_rx_offloads_nodis =
62                 DEV_RX_OFFLOAD_IPV4_CKSUM |
63                 DEV_RX_OFFLOAD_UDP_CKSUM |
64                 DEV_RX_OFFLOAD_TCP_CKSUM |
65                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
66                 DEV_RX_OFFLOAD_RSS_HASH;
67
68 /* Supported Tx offloads */
69 static uint64_t dev_tx_offloads_sup =
70                 DEV_TX_OFFLOAD_MT_LOCKFREE |
71                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
72
73 /* Tx offloads which cannot be disabled */
74 static uint64_t dev_tx_offloads_nodis =
75                 DEV_TX_OFFLOAD_IPV4_CKSUM |
76                 DEV_TX_OFFLOAD_UDP_CKSUM |
77                 DEV_TX_OFFLOAD_TCP_CKSUM |
78                 DEV_TX_OFFLOAD_SCTP_CKSUM |
79                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
80                 DEV_TX_OFFLOAD_MULTI_SEGS;
81
82 /* Keep track of whether QMAN and BMAN have been globally initialized */
83 static int is_global_init;
84 static int fmc_q = 1;   /* Indicates the use of static fmc for distribution */
85 static int default_q;   /* use default queue - FMC is not executed*/
86 /* At present we only allow up to 4 push mode queues as default - as each of
87  * this queue need dedicated portal and we are short of portals.
88  */
89 #define DPAA_MAX_PUSH_MODE_QUEUE       8
90 #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
91
92 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
93 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
94
95
96 /* Per RX FQ Taildrop in frame count */
97 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
98
99 /* Per TX FQ Taildrop in frame count, disabled by default */
100 static unsigned int td_tx_threshold;
101
102 struct rte_dpaa_xstats_name_off {
103         char name[RTE_ETH_XSTATS_NAME_SIZE];
104         uint32_t offset;
105 };
106
107 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
108         {"rx_align_err",
109                 offsetof(struct dpaa_if_stats, raln)},
110         {"rx_valid_pause",
111                 offsetof(struct dpaa_if_stats, rxpf)},
112         {"rx_fcs_err",
113                 offsetof(struct dpaa_if_stats, rfcs)},
114         {"rx_vlan_frame",
115                 offsetof(struct dpaa_if_stats, rvlan)},
116         {"rx_frame_err",
117                 offsetof(struct dpaa_if_stats, rerr)},
118         {"rx_drop_err",
119                 offsetof(struct dpaa_if_stats, rdrp)},
120         {"rx_undersized",
121                 offsetof(struct dpaa_if_stats, rund)},
122         {"rx_oversize_err",
123                 offsetof(struct dpaa_if_stats, rovr)},
124         {"rx_fragment_pkt",
125                 offsetof(struct dpaa_if_stats, rfrg)},
126         {"tx_valid_pause",
127                 offsetof(struct dpaa_if_stats, txpf)},
128         {"tx_fcs_err",
129                 offsetof(struct dpaa_if_stats, terr)},
130         {"tx_vlan_frame",
131                 offsetof(struct dpaa_if_stats, tvlan)},
132         {"rx_undersized",
133                 offsetof(struct dpaa_if_stats, tund)},
134 };
135
136 static struct rte_dpaa_driver rte_dpaa_pmd;
137
138 static int
139 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
140
141 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
142                                 int wait_to_complete __rte_unused);
143
144 static void dpaa_interrupt_handler(void *param);
145
146 static inline void
147 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
148 {
149         memset(opts, 0, sizeof(struct qm_mcc_initfq));
150         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
151         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
152                            QM_FQCTRL_PREFERINCACHE;
153         opts->fqd.context_a.stashing.exclusive = 0;
154         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
155                 opts->fqd.context_a.stashing.annotation_cl =
156                                                 DPAA_IF_RX_ANNOTATION_STASH;
157         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
158         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
159 }
160
161 static int
162 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
163 {
164         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
165                                 + VLAN_TAG_SIZE;
166         uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
167
168         PMD_INIT_FUNC_TRACE();
169
170         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
171                 return -EINVAL;
172         /*
173          * Refuse mtu that requires the support of scattered packets
174          * when this feature has not been enabled before.
175          */
176         if (dev->data->min_rx_buf_size &&
177                 !dev->data->scattered_rx && frame_size > buffsz) {
178                 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
179                 return -EINVAL;
180         }
181
182         /* check <seg size> * <max_seg>  >= max_frame */
183         if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
184                 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
185                 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
186                                 buffsz * DPAA_SGT_MAX_ENTRIES);
187                 return -EINVAL;
188         }
189
190         if (mtu > RTE_ETHER_MTU)
191                 dev->data->dev_conf.rxmode.offloads |=
192                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
193         else
194                 dev->data->dev_conf.rxmode.offloads &=
195                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
196
197         fman_if_set_maxfrm(dev->process_private, frame_size);
198
199         return 0;
200 }
201
202 static int
203 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
204 {
205         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
206         uint64_t rx_offloads = eth_conf->rxmode.offloads;
207         uint64_t tx_offloads = eth_conf->txmode.offloads;
208         struct rte_device *rdev = dev->device;
209         struct rte_eth_link *link = &dev->data->dev_link;
210         struct rte_dpaa_device *dpaa_dev;
211         struct fman_if *fif = dev->process_private;
212         struct __fman_if *__fif;
213         struct rte_intr_handle *intr_handle;
214         uint32_t max_rx_pktlen;
215         int speed, duplex;
216         int ret;
217
218         PMD_INIT_FUNC_TRACE();
219
220         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
221         intr_handle = &dpaa_dev->intr_handle;
222         __fif = container_of(fif, struct __fman_if, __if);
223
224         /* Rx offloads which are enabled by default */
225         if (dev_rx_offloads_nodis & ~rx_offloads) {
226                 DPAA_PMD_INFO(
227                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
228                 " fixed are 0x%" PRIx64,
229                 rx_offloads, dev_rx_offloads_nodis);
230         }
231
232         /* Tx offloads which are enabled by default */
233         if (dev_tx_offloads_nodis & ~tx_offloads) {
234                 DPAA_PMD_INFO(
235                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
236                 " fixed are 0x%" PRIx64,
237                 tx_offloads, dev_tx_offloads_nodis);
238         }
239
240         max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
241                         RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
242         if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
243                 DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
244                         "supported is %d",
245                         max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
246                 max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
247         }
248
249         fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
250
251         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
252                 DPAA_PMD_DEBUG("enabling scatter mode");
253                 fman_if_set_sg(dev->process_private, 1);
254                 dev->data->scattered_rx = 1;
255         }
256
257         if (!(default_q || fmc_q)) {
258                 if (dpaa_fm_config(dev,
259                         eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
260                         dpaa_write_fm_config_to_file();
261                         DPAA_PMD_ERR("FM port configuration: Failed\n");
262                         return -1;
263                 }
264                 dpaa_write_fm_config_to_file();
265         }
266
267         /* if the interrupts were configured on this devices*/
268         if (intr_handle && intr_handle->fd) {
269                 if (dev->data->dev_conf.intr_conf.lsc != 0)
270                         rte_intr_callback_register(intr_handle,
271                                            dpaa_interrupt_handler,
272                                            (void *)dev);
273
274                 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
275                 if (ret) {
276                         if (dev->data->dev_conf.intr_conf.lsc != 0) {
277                                 rte_intr_callback_unregister(intr_handle,
278                                         dpaa_interrupt_handler,
279                                         (void *)dev);
280                                 if (ret == EINVAL)
281                                         printf("Failed to enable interrupt: Not Supported\n");
282                                 else
283                                         printf("Failed to enable interrupt\n");
284                         }
285                         dev->data->dev_conf.intr_conf.lsc = 0;
286                         dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
287                 }
288         }
289
290         /* Wait for link status to get updated */
291         if (!link->link_status)
292                 sleep(1);
293
294         /* Configure link only if link is UP*/
295         if (link->link_status) {
296                 if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
297                         /* Start autoneg only if link is not in autoneg mode */
298                         if (!link->link_autoneg)
299                                 dpaa_restart_link_autoneg(__fif->node_name);
300                 } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
301                         switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
302                         case ETH_LINK_SPEED_10M_HD:
303                                 speed = ETH_SPEED_NUM_10M;
304                                 duplex = ETH_LINK_HALF_DUPLEX;
305                                 break;
306                         case ETH_LINK_SPEED_10M:
307                                 speed = ETH_SPEED_NUM_10M;
308                                 duplex = ETH_LINK_FULL_DUPLEX;
309                                 break;
310                         case ETH_LINK_SPEED_100M_HD:
311                                 speed = ETH_SPEED_NUM_100M;
312                                 duplex = ETH_LINK_HALF_DUPLEX;
313                                 break;
314                         case ETH_LINK_SPEED_100M:
315                                 speed = ETH_SPEED_NUM_100M;
316                                 duplex = ETH_LINK_FULL_DUPLEX;
317                                 break;
318                         case ETH_LINK_SPEED_1G:
319                                 speed = ETH_SPEED_NUM_1G;
320                                 duplex = ETH_LINK_FULL_DUPLEX;
321                                 break;
322                         case ETH_LINK_SPEED_2_5G:
323                                 speed = ETH_SPEED_NUM_2_5G;
324                                 duplex = ETH_LINK_FULL_DUPLEX;
325                                 break;
326                         case ETH_LINK_SPEED_10G:
327                                 speed = ETH_SPEED_NUM_10G;
328                                 duplex = ETH_LINK_FULL_DUPLEX;
329                                 break;
330                         default:
331                                 speed = ETH_SPEED_NUM_NONE;
332                                 duplex = ETH_LINK_FULL_DUPLEX;
333                                 break;
334                         }
335                         /* Set link speed */
336                         dpaa_update_link_speed(__fif->node_name, speed, duplex);
337                 } else {
338                         /* Manual autoneg - custom advertisement speed. */
339                         printf("Custom Advertisement speeds not supported\n");
340                 }
341         }
342
343         return 0;
344 }
345
346 static const uint32_t *
347 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
348 {
349         static const uint32_t ptypes[] = {
350                 RTE_PTYPE_L2_ETHER,
351                 RTE_PTYPE_L2_ETHER_VLAN,
352                 RTE_PTYPE_L2_ETHER_ARP,
353                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
354                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
355                 RTE_PTYPE_L4_ICMP,
356                 RTE_PTYPE_L4_TCP,
357                 RTE_PTYPE_L4_UDP,
358                 RTE_PTYPE_L4_FRAG,
359                 RTE_PTYPE_L4_TCP,
360                 RTE_PTYPE_L4_UDP,
361                 RTE_PTYPE_L4_SCTP
362         };
363
364         PMD_INIT_FUNC_TRACE();
365
366         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
367                 return ptypes;
368         return NULL;
369 }
370
371 static void dpaa_interrupt_handler(void *param)
372 {
373         struct rte_eth_dev *dev = param;
374         struct rte_device *rdev = dev->device;
375         struct rte_dpaa_device *dpaa_dev;
376         struct rte_intr_handle *intr_handle;
377         uint64_t buf;
378         int bytes_read;
379
380         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
381         intr_handle = &dpaa_dev->intr_handle;
382
383         bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
384         if (bytes_read < 0)
385                 DPAA_PMD_ERR("Error reading eventfd\n");
386         dpaa_eth_link_update(dev, 0);
387         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
388 }
389
390 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
391 {
392         struct dpaa_if *dpaa_intf = dev->data->dev_private;
393
394         PMD_INIT_FUNC_TRACE();
395
396         if (!(default_q || fmc_q))
397                 dpaa_write_fm_config_to_file();
398
399         /* Change tx callback to the real one */
400         if (dpaa_intf->cgr_tx)
401                 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
402         else
403                 dev->tx_pkt_burst = dpaa_eth_queue_tx;
404
405         fman_if_enable_rx(dev->process_private);
406
407         return 0;
408 }
409
410 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
411 {
412         struct fman_if *fif = dev->process_private;
413
414         PMD_INIT_FUNC_TRACE();
415         dev->data->dev_started = 0;
416
417         if (!fif->is_shared_mac)
418                 fman_if_disable_rx(fif);
419         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
420
421         return 0;
422 }
423
424 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
425 {
426         struct fman_if *fif = dev->process_private;
427         struct __fman_if *__fif;
428         struct rte_device *rdev = dev->device;
429         struct rte_dpaa_device *dpaa_dev;
430         struct rte_intr_handle *intr_handle;
431         struct rte_eth_link *link = &dev->data->dev_link;
432         struct dpaa_if *dpaa_intf = dev->data->dev_private;
433         int loop;
434         int ret;
435
436         PMD_INIT_FUNC_TRACE();
437
438         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
439                 return 0;
440
441         if (!dpaa_intf) {
442                 DPAA_PMD_WARN("Already closed or not started");
443                 return -1;
444         }
445
446         /* DPAA FM deconfig */
447         if (!(default_q || fmc_q)) {
448                 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
449                         DPAA_PMD_WARN("DPAA FM deconfig failed\n");
450         }
451
452         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
453         intr_handle = &dpaa_dev->intr_handle;
454         __fif = container_of(fif, struct __fman_if, __if);
455
456         ret = dpaa_eth_dev_stop(dev);
457
458         /* Reset link to autoneg */
459         if (link->link_status && !link->link_autoneg)
460                 dpaa_restart_link_autoneg(__fif->node_name);
461
462         if (intr_handle && intr_handle->fd &&
463             dev->data->dev_conf.intr_conf.lsc != 0) {
464                 dpaa_intr_disable(__fif->node_name);
465                 rte_intr_callback_unregister(intr_handle,
466                                              dpaa_interrupt_handler,
467                                              (void *)dev);
468         }
469
470         /* release configuration memory */
471         if (dpaa_intf->fc_conf)
472                 rte_free(dpaa_intf->fc_conf);
473
474         /* Release RX congestion Groups */
475         if (dpaa_intf->cgr_rx) {
476                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
477                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
478         }
479
480         rte_free(dpaa_intf->cgr_rx);
481         dpaa_intf->cgr_rx = NULL;
482         /* Release TX congestion Groups */
483         if (dpaa_intf->cgr_tx) {
484                 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
485                         qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
486                 rte_free(dpaa_intf->cgr_tx);
487                 dpaa_intf->cgr_tx = NULL;
488         }
489
490         rte_free(dpaa_intf->rx_queues);
491         dpaa_intf->rx_queues = NULL;
492
493         rte_free(dpaa_intf->tx_queues);
494         dpaa_intf->tx_queues = NULL;
495
496         return ret;
497 }
498
499 static int
500 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
501                      char *fw_version,
502                      size_t fw_size)
503 {
504         int ret;
505         FILE *svr_file = NULL;
506         unsigned int svr_ver = 0;
507
508         PMD_INIT_FUNC_TRACE();
509
510         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
511         if (!svr_file) {
512                 DPAA_PMD_ERR("Unable to open SoC device");
513                 return -ENOTSUP; /* Not supported on this infra */
514         }
515         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
516                 dpaa_svr_family = svr_ver & SVR_MASK;
517         else
518                 DPAA_PMD_ERR("Unable to read SoC device");
519
520         fclose(svr_file);
521
522         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
523                        svr_ver, fman_ip_rev);
524         if (ret < 0)
525                 return -EINVAL;
526
527         ret += 1; /* add the size of '\0' */
528         if (fw_size < (size_t)ret)
529                 return ret;
530         else
531                 return 0;
532 }
533
534 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
535                              struct rte_eth_dev_info *dev_info)
536 {
537         struct dpaa_if *dpaa_intf = dev->data->dev_private;
538         struct fman_if *fif = dev->process_private;
539
540         DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
541
542         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
543         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
544         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
545         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
546         dev_info->max_hash_mac_addrs = 0;
547         dev_info->max_vfs = 0;
548         dev_info->max_vmdq_pools = ETH_16_POOLS;
549         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
550
551         if (fif->mac_type == fman_mac_1g) {
552                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
553                                         | ETH_LINK_SPEED_10M
554                                         | ETH_LINK_SPEED_100M_HD
555                                         | ETH_LINK_SPEED_100M
556                                         | ETH_LINK_SPEED_1G;
557         } else if (fif->mac_type == fman_mac_2_5g) {
558                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
559                                         | ETH_LINK_SPEED_10M
560                                         | ETH_LINK_SPEED_100M_HD
561                                         | ETH_LINK_SPEED_100M
562                                         | ETH_LINK_SPEED_1G
563                                         | ETH_LINK_SPEED_2_5G;
564         } else if (fif->mac_type == fman_mac_10g) {
565                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
566                                         | ETH_LINK_SPEED_10M
567                                         | ETH_LINK_SPEED_100M_HD
568                                         | ETH_LINK_SPEED_100M
569                                         | ETH_LINK_SPEED_1G
570                                         | ETH_LINK_SPEED_2_5G
571                                         | ETH_LINK_SPEED_10G;
572         } else {
573                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
574                              dpaa_intf->name, fif->mac_type);
575                 return -EINVAL;
576         }
577
578         dev_info->rx_offload_capa = dev_rx_offloads_sup |
579                                         dev_rx_offloads_nodis;
580         dev_info->tx_offload_capa = dev_tx_offloads_sup |
581                                         dev_tx_offloads_nodis;
582         dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
583         dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
584         dev_info->default_rxportconf.nb_queues = 1;
585         dev_info->default_txportconf.nb_queues = 1;
586         dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
587         dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
588
589         return 0;
590 }
591
592 static int
593 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
594                         __rte_unused uint16_t queue_id,
595                         struct rte_eth_burst_mode *mode)
596 {
597         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
598         int ret = -EINVAL;
599         unsigned int i;
600         const struct burst_info {
601                 uint64_t flags;
602                 const char *output;
603         } rx_offload_map[] = {
604                         {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
605                         {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
606                         {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
607                         {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
608                         {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
609                         {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
610                         {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
611         };
612
613         /* Update Rx offload info */
614         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
615                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
616                         snprintf(mode->info, sizeof(mode->info), "%s",
617                                 rx_offload_map[i].output);
618                         ret = 0;
619                         break;
620                 }
621         }
622         return ret;
623 }
624
625 static int
626 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
627                         __rte_unused uint16_t queue_id,
628                         struct rte_eth_burst_mode *mode)
629 {
630         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
631         int ret = -EINVAL;
632         unsigned int i;
633         const struct burst_info {
634                 uint64_t flags;
635                 const char *output;
636         } tx_offload_map[] = {
637                         {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
638                         {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
639                         {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
640                         {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
641                         {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
642                         {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
643                         {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
644                         {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
645         };
646
647         /* Update Tx offload info */
648         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
649                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
650                         snprintf(mode->info, sizeof(mode->info), "%s",
651                                 tx_offload_map[i].output);
652                         ret = 0;
653                         break;
654                 }
655         }
656         return ret;
657 }
658
659 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
660                                 int wait_to_complete)
661 {
662         struct dpaa_if *dpaa_intf = dev->data->dev_private;
663         struct rte_eth_link *link = &dev->data->dev_link;
664         struct fman_if *fif = dev->process_private;
665         struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
666         int ret, ioctl_version;
667         uint8_t count;
668
669         PMD_INIT_FUNC_TRACE();
670
671         ioctl_version = dpaa_get_ioctl_version_number();
672
673         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
674                 for (count = 0; count <= MAX_REPEAT_TIME; count++) {
675                         ret = dpaa_get_link_status(__fif->node_name, link);
676                         if (ret)
677                                 return ret;
678                         if (link->link_status == ETH_LINK_DOWN &&
679                             wait_to_complete)
680                                 rte_delay_ms(CHECK_INTERVAL);
681                         else
682                                 break;
683                 }
684         } else {
685                 link->link_status = dpaa_intf->valid;
686         }
687
688         if (ioctl_version < 2) {
689                 link->link_duplex = ETH_LINK_FULL_DUPLEX;
690                 link->link_autoneg = ETH_LINK_AUTONEG;
691
692                 if (fif->mac_type == fman_mac_1g)
693                         link->link_speed = ETH_SPEED_NUM_1G;
694                 else if (fif->mac_type == fman_mac_2_5g)
695                         link->link_speed = ETH_SPEED_NUM_2_5G;
696                 else if (fif->mac_type == fman_mac_10g)
697                         link->link_speed = ETH_SPEED_NUM_10G;
698                 else
699                         DPAA_PMD_ERR("invalid link_speed: %s, %d",
700                                      dpaa_intf->name, fif->mac_type);
701         }
702
703         DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
704                       link->link_status ? "Up" : "Down");
705         return 0;
706 }
707
708 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
709                                struct rte_eth_stats *stats)
710 {
711         PMD_INIT_FUNC_TRACE();
712
713         fman_if_stats_get(dev->process_private, stats);
714         return 0;
715 }
716
717 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
718 {
719         PMD_INIT_FUNC_TRACE();
720
721         fman_if_stats_reset(dev->process_private);
722
723         return 0;
724 }
725
726 static int
727 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
728                     unsigned int n)
729 {
730         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
731         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
732
733         if (n < num)
734                 return num;
735
736         if (xstats == NULL)
737                 return 0;
738
739         fman_if_stats_get_all(dev->process_private, values,
740                               sizeof(struct dpaa_if_stats) / 8);
741
742         for (i = 0; i < num; i++) {
743                 xstats[i].id = i;
744                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
745         }
746         return i;
747 }
748
749 static int
750 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
751                       struct rte_eth_xstat_name *xstats_names,
752                       unsigned int limit)
753 {
754         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
755
756         if (limit < stat_cnt)
757                 return stat_cnt;
758
759         if (xstats_names != NULL)
760                 for (i = 0; i < stat_cnt; i++)
761                         strlcpy(xstats_names[i].name,
762                                 dpaa_xstats_strings[i].name,
763                                 sizeof(xstats_names[i].name));
764
765         return stat_cnt;
766 }
767
768 static int
769 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
770                       uint64_t *values, unsigned int n)
771 {
772         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
773         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
774
775         if (!ids) {
776                 if (n < stat_cnt)
777                         return stat_cnt;
778
779                 if (!values)
780                         return 0;
781
782                 fman_if_stats_get_all(dev->process_private, values_copy,
783                                       sizeof(struct dpaa_if_stats) / 8);
784
785                 for (i = 0; i < stat_cnt; i++)
786                         values[i] =
787                                 values_copy[dpaa_xstats_strings[i].offset / 8];
788
789                 return stat_cnt;
790         }
791
792         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
793
794         for (i = 0; i < n; i++) {
795                 if (ids[i] >= stat_cnt) {
796                         DPAA_PMD_ERR("id value isn't valid");
797                         return -1;
798                 }
799                 values[i] = values_copy[ids[i]];
800         }
801         return n;
802 }
803
804 static int
805 dpaa_xstats_get_names_by_id(
806         struct rte_eth_dev *dev,
807         const uint64_t *ids,
808         struct rte_eth_xstat_name *xstats_names,
809         unsigned int limit)
810 {
811         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
812         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
813
814         if (!ids)
815                 return dpaa_xstats_get_names(dev, xstats_names, limit);
816
817         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
818
819         for (i = 0; i < limit; i++) {
820                 if (ids[i] >= stat_cnt) {
821                         DPAA_PMD_ERR("id value isn't valid");
822                         return -1;
823                 }
824                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
825         }
826         return limit;
827 }
828
829 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
830 {
831         PMD_INIT_FUNC_TRACE();
832
833         fman_if_promiscuous_enable(dev->process_private);
834
835         return 0;
836 }
837
838 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
839 {
840         PMD_INIT_FUNC_TRACE();
841
842         fman_if_promiscuous_disable(dev->process_private);
843
844         return 0;
845 }
846
847 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
848 {
849         PMD_INIT_FUNC_TRACE();
850
851         fman_if_set_mcast_filter_table(dev->process_private);
852
853         return 0;
854 }
855
856 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
857 {
858         PMD_INIT_FUNC_TRACE();
859
860         fman_if_reset_mcast_filter_table(dev->process_private);
861
862         return 0;
863 }
864
865 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
866 {
867         struct dpaa_if *dpaa_intf = dev->data->dev_private;
868         struct fman_if_ic_params icp;
869         uint32_t fd_offset;
870         uint32_t bp_size;
871
872         memset(&icp, 0, sizeof(icp));
873         /* set ICEOF for to the default value , which is 0*/
874         icp.iciof = DEFAULT_ICIOF;
875         icp.iceof = DEFAULT_RX_ICEOF;
876         icp.icsz = DEFAULT_ICSZ;
877         fman_if_set_ic_params(dev->process_private, &icp);
878
879         fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
880         fman_if_set_fdoff(dev->process_private, fd_offset);
881
882         /* Buffer pool size should be equal to Dataroom Size*/
883         bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
884
885         fman_if_set_bp(dev->process_private,
886                        dpaa_intf->bp_info->mp->size,
887                        dpaa_intf->bp_info->bpid, bp_size);
888 }
889
890 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
891                                              int8_t vsp_id, uint32_t bpid)
892 {
893         struct dpaa_if *dpaa_intf = dev->data->dev_private;
894         struct fman_if *fif = dev->process_private;
895
896         if (fif->num_profiles) {
897                 if (vsp_id < 0)
898                         vsp_id = fif->base_profile_id;
899         } else {
900                 if (vsp_id < 0)
901                         vsp_id = 0;
902         }
903
904         if (dpaa_intf->vsp_bpid[vsp_id] &&
905                 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
906                 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
907
908                 return -1;
909         }
910
911         return 0;
912 }
913
914 static
915 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
916                             uint16_t nb_desc,
917                             unsigned int socket_id __rte_unused,
918                             const struct rte_eth_rxconf *rx_conf,
919                             struct rte_mempool *mp)
920 {
921         struct dpaa_if *dpaa_intf = dev->data->dev_private;
922         struct fman_if *fif = dev->process_private;
923         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
924         struct qm_mcc_initfq opts = {0};
925         u32 flags = 0;
926         int ret;
927         u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
928         uint32_t max_rx_pktlen;
929
930         PMD_INIT_FUNC_TRACE();
931
932         if (queue_idx >= dev->data->nb_rx_queues) {
933                 rte_errno = EOVERFLOW;
934                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
935                       (void *)dev, queue_idx, dev->data->nb_rx_queues);
936                 return -rte_errno;
937         }
938
939         /* Rx deferred start is not supported */
940         if (rx_conf->rx_deferred_start) {
941                 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
942                 return -EINVAL;
943         }
944         rxq->nb_desc = UINT16_MAX;
945         rxq->offloads = rx_conf->offloads;
946
947         DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
948                         queue_idx, rxq->fqid);
949
950         if (!fif->num_profiles) {
951                 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
952                         dpaa_intf->bp_info->mp != mp) {
953                         DPAA_PMD_WARN("Multiple pools on same interface not"
954                                       " supported");
955                         return -EINVAL;
956                 }
957         } else {
958                 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
959                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
960                         return -EINVAL;
961                 }
962         }
963
964         if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
965             dpaa_intf->bp_info->mp != mp) {
966                 DPAA_PMD_WARN("Multiple pools on same interface not supported");
967                 return -EINVAL;
968         }
969
970         max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
971                 VLAN_TAG_SIZE;
972         /* Max packet can fit in single buffer */
973         if (max_rx_pktlen <= buffsz) {
974                 ;
975         } else if (dev->data->dev_conf.rxmode.offloads &
976                         DEV_RX_OFFLOAD_SCATTER) {
977                 if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
978                         DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
979                                 "MaxSGlist %d",
980                                 max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
981                         rte_errno = EOVERFLOW;
982                         return -rte_errno;
983                 }
984         } else {
985                 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
986                      " larger than a single mbuf (%u) and scattered"
987                      " mode has not been requested",
988                      max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
989         }
990
991         dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
992
993         /* For shared interface, it's done in kernel, skip.*/
994         if (!fif->is_shared_mac)
995                 dpaa_fman_if_pool_setup(dev);
996
997         if (fif->num_profiles) {
998                 int8_t vsp_id = rxq->vsp_id;
999
1000                 if (vsp_id >= 0) {
1001                         ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1002                                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
1003                                         fif);
1004                         if (ret) {
1005                                 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1006                                 return ret;
1007                         }
1008                 } else {
1009                         DPAA_PMD_INFO("Base profile is associated to"
1010                                 " RXQ fqid:%d\r\n", rxq->fqid);
1011                         if (fif->is_shared_mac) {
1012                                 DPAA_PMD_ERR("Fatal: Base profile is associated"
1013                                              " to shared interface on DPDK.");
1014                                 return -EINVAL;
1015                         }
1016                         dpaa_intf->vsp_bpid[fif->base_profile_id] =
1017                                 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1018                 }
1019         } else {
1020                 dpaa_intf->vsp_bpid[0] =
1021                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1022         }
1023
1024         dpaa_intf->valid = 1;
1025         DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
1026                 fman_if_get_sg_enable(fif), max_rx_pktlen);
1027         /* checking if push mode only, no error check for now */
1028         if (!rxq->is_static &&
1029             dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
1030                 struct qman_portal *qp;
1031                 int q_fd;
1032
1033                 dpaa_push_queue_idx++;
1034                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1035                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
1036                                    QM_FQCTRL_CTXASTASHING |
1037                                    QM_FQCTRL_PREFERINCACHE;
1038                 opts.fqd.context_a.stashing.exclusive = 0;
1039                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
1040                  * So do not enable stashing in this case
1041                  */
1042                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1043                         opts.fqd.context_a.stashing.annotation_cl =
1044                                                 DPAA_IF_RX_ANNOTATION_STASH;
1045                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1046                 opts.fqd.context_a.stashing.context_cl =
1047                                                 DPAA_IF_RX_CONTEXT_STASH;
1048
1049                 /*Create a channel and associate given queue with the channel*/
1050                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
1051                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1052                 opts.fqd.dest.channel = rxq->ch_id;
1053                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
1054                 flags = QMAN_INITFQ_FLAG_SCHED;
1055
1056                 /* Configure tail drop */
1057                 if (dpaa_intf->cgr_rx) {
1058                         opts.we_mask |= QM_INITFQ_WE_CGID;
1059                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
1060                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1061                 }
1062                 ret = qman_init_fq(rxq, flags, &opts);
1063                 if (ret) {
1064                         DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
1065                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1066                         return ret;
1067                 }
1068                 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
1069                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
1070                 } else {
1071                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1072                         rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
1073                 }
1074
1075                 rxq->is_static = true;
1076
1077                 /* Allocate qman specific portals */
1078                 qp = fsl_qman_fq_portal_create(&q_fd);
1079                 if (!qp) {
1080                         DPAA_PMD_ERR("Unable to alloc fq portal");
1081                         return -1;
1082                 }
1083                 rxq->qp = qp;
1084
1085                 /* Set up the device interrupt handler */
1086                 if (!dev->intr_handle) {
1087                         struct rte_dpaa_device *dpaa_dev;
1088                         struct rte_device *rdev = dev->device;
1089
1090                         dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1091                                                 device);
1092                         dev->intr_handle = &dpaa_dev->intr_handle;
1093                         dev->intr_handle->intr_vec = rte_zmalloc(NULL,
1094                                         dpaa_push_mode_max_queue, 0);
1095                         if (!dev->intr_handle->intr_vec) {
1096                                 DPAA_PMD_ERR("intr_vec alloc failed");
1097                                 return -ENOMEM;
1098                         }
1099                         dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
1100                         dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
1101                 }
1102
1103                 dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
1104                 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
1105                 dev->intr_handle->efds[queue_idx] = q_fd;
1106                 rxq->q_fd = q_fd;
1107         }
1108         rxq->bp_array = rte_dpaa_bpid_info;
1109         dev->data->rx_queues[queue_idx] = rxq;
1110
1111         /* configure the CGR size as per the desc size */
1112         if (dpaa_intf->cgr_rx) {
1113                 struct qm_mcc_initcgr cgr_opts = {0};
1114
1115                 rxq->nb_desc = nb_desc;
1116                 /* Enable tail drop with cgr on this queue */
1117                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1118                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1119                 if (ret) {
1120                         DPAA_PMD_WARN(
1121                                 "rx taildrop modify fail on fqid %d (ret=%d)",
1122                                 rxq->fqid, ret);
1123                 }
1124         }
1125         /* Enable main queue to receive error packets also by default */
1126         fman_if_set_err_fqid(fif, rxq->fqid);
1127         return 0;
1128 }
1129
1130 int
1131 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1132                 int eth_rx_queue_id,
1133                 u16 ch_id,
1134                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1135 {
1136         int ret;
1137         u32 flags = 0;
1138         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1139         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1140         struct qm_mcc_initfq opts = {0};
1141
1142         if (dpaa_push_mode_max_queue)
1143                 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1144                               "PUSH mode already enabled for first %d queues.\n"
1145                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1146                               dpaa_push_mode_max_queue);
1147
1148         dpaa_poll_queue_default_config(&opts);
1149
1150         switch (queue_conf->ev.sched_type) {
1151         case RTE_SCHED_TYPE_ATOMIC:
1152                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1153                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1154                  * configuration with HOLD_ACTIVE setting
1155                  */
1156                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1157                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1158                 break;
1159         case RTE_SCHED_TYPE_ORDERED:
1160                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1161                 return -1;
1162         default:
1163                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1164                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1165                 break;
1166         }
1167
1168         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1169         opts.fqd.dest.channel = ch_id;
1170         opts.fqd.dest.wq = queue_conf->ev.priority;
1171
1172         if (dpaa_intf->cgr_rx) {
1173                 opts.we_mask |= QM_INITFQ_WE_CGID;
1174                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1175                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1176         }
1177
1178         flags = QMAN_INITFQ_FLAG_SCHED;
1179
1180         ret = qman_init_fq(rxq, flags, &opts);
1181         if (ret) {
1182                 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1183                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1184                 return ret;
1185         }
1186
1187         /* copy configuration which needs to be filled during dequeue */
1188         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1189         dev->data->rx_queues[eth_rx_queue_id] = rxq;
1190
1191         return ret;
1192 }
1193
1194 int
1195 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1196                 int eth_rx_queue_id)
1197 {
1198         struct qm_mcc_initfq opts;
1199         int ret;
1200         u32 flags = 0;
1201         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1202         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1203
1204         dpaa_poll_queue_default_config(&opts);
1205
1206         if (dpaa_intf->cgr_rx) {
1207                 opts.we_mask |= QM_INITFQ_WE_CGID;
1208                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1209                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1210         }
1211
1212         ret = qman_init_fq(rxq, flags, &opts);
1213         if (ret) {
1214                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1215                              rxq->fqid, ret);
1216         }
1217
1218         rxq->cb.dqrr_dpdk_cb = NULL;
1219         dev->data->rx_queues[eth_rx_queue_id] = NULL;
1220
1221         return 0;
1222 }
1223
1224 static
1225 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1226                             uint16_t nb_desc __rte_unused,
1227                 unsigned int socket_id __rte_unused,
1228                 const struct rte_eth_txconf *tx_conf)
1229 {
1230         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1231         struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
1232
1233         PMD_INIT_FUNC_TRACE();
1234
1235         /* Tx deferred start is not supported */
1236         if (tx_conf->tx_deferred_start) {
1237                 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1238                 return -EINVAL;
1239         }
1240         txq->nb_desc = UINT16_MAX;
1241         txq->offloads = tx_conf->offloads;
1242
1243         if (queue_idx >= dev->data->nb_tx_queues) {
1244                 rte_errno = EOVERFLOW;
1245                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1246                       (void *)dev, queue_idx, dev->data->nb_tx_queues);
1247                 return -rte_errno;
1248         }
1249
1250         DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1251                         queue_idx, txq->fqid);
1252         dev->data->tx_queues[queue_idx] = txq;
1253
1254         return 0;
1255 }
1256
1257 static uint32_t
1258 dpaa_dev_rx_queue_count(void *rx_queue)
1259 {
1260         struct qman_fq *rxq = rx_queue;
1261         u32 frm_cnt = 0;
1262
1263         PMD_INIT_FUNC_TRACE();
1264
1265         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1266                 DPAA_PMD_DEBUG("RX frame count for q(%p) is %u",
1267                                rx_queue, frm_cnt);
1268         }
1269         return frm_cnt;
1270 }
1271
1272 static int dpaa_link_down(struct rte_eth_dev *dev)
1273 {
1274         struct fman_if *fif = dev->process_private;
1275         struct __fman_if *__fif;
1276
1277         PMD_INIT_FUNC_TRACE();
1278
1279         __fif = container_of(fif, struct __fman_if, __if);
1280
1281         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1282                 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1283         else
1284                 return dpaa_eth_dev_stop(dev);
1285         return 0;
1286 }
1287
1288 static int dpaa_link_up(struct rte_eth_dev *dev)
1289 {
1290         struct fman_if *fif = dev->process_private;
1291         struct __fman_if *__fif;
1292
1293         PMD_INIT_FUNC_TRACE();
1294
1295         __fif = container_of(fif, struct __fman_if, __if);
1296
1297         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1298                 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1299         else
1300                 dpaa_eth_dev_start(dev);
1301         return 0;
1302 }
1303
1304 static int
1305 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1306                    struct rte_eth_fc_conf *fc_conf)
1307 {
1308         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1309         struct rte_eth_fc_conf *net_fc;
1310
1311         PMD_INIT_FUNC_TRACE();
1312
1313         if (!(dpaa_intf->fc_conf)) {
1314                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1315                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1316                 if (!dpaa_intf->fc_conf) {
1317                         DPAA_PMD_ERR("unable to save flow control info");
1318                         return -ENOMEM;
1319                 }
1320         }
1321         net_fc = dpaa_intf->fc_conf;
1322
1323         if (fc_conf->high_water < fc_conf->low_water) {
1324                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1325                 return -EINVAL;
1326         }
1327
1328         if (fc_conf->mode == RTE_FC_NONE) {
1329                 return 0;
1330         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
1331                  fc_conf->mode == RTE_FC_FULL) {
1332                 fman_if_set_fc_threshold(dev->process_private,
1333                                          fc_conf->high_water,
1334                                          fc_conf->low_water,
1335                                          dpaa_intf->bp_info->bpid);
1336                 if (fc_conf->pause_time)
1337                         fman_if_set_fc_quanta(dev->process_private,
1338                                               fc_conf->pause_time);
1339         }
1340
1341         /* Save the information in dpaa device */
1342         net_fc->pause_time = fc_conf->pause_time;
1343         net_fc->high_water = fc_conf->high_water;
1344         net_fc->low_water = fc_conf->low_water;
1345         net_fc->send_xon = fc_conf->send_xon;
1346         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1347         net_fc->mode = fc_conf->mode;
1348         net_fc->autoneg = fc_conf->autoneg;
1349
1350         return 0;
1351 }
1352
1353 static int
1354 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1355                    struct rte_eth_fc_conf *fc_conf)
1356 {
1357         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1358         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1359         int ret;
1360
1361         PMD_INIT_FUNC_TRACE();
1362
1363         if (net_fc) {
1364                 fc_conf->pause_time = net_fc->pause_time;
1365                 fc_conf->high_water = net_fc->high_water;
1366                 fc_conf->low_water = net_fc->low_water;
1367                 fc_conf->send_xon = net_fc->send_xon;
1368                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1369                 fc_conf->mode = net_fc->mode;
1370                 fc_conf->autoneg = net_fc->autoneg;
1371                 return 0;
1372         }
1373         ret = fman_if_get_fc_threshold(dev->process_private);
1374         if (ret) {
1375                 fc_conf->mode = RTE_FC_TX_PAUSE;
1376                 fc_conf->pause_time =
1377                         fman_if_get_fc_quanta(dev->process_private);
1378         } else {
1379                 fc_conf->mode = RTE_FC_NONE;
1380         }
1381
1382         return 0;
1383 }
1384
1385 static int
1386 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1387                              struct rte_ether_addr *addr,
1388                              uint32_t index,
1389                              __rte_unused uint32_t pool)
1390 {
1391         int ret;
1392
1393         PMD_INIT_FUNC_TRACE();
1394
1395         ret = fman_if_add_mac_addr(dev->process_private,
1396                                    addr->addr_bytes, index);
1397
1398         if (ret)
1399                 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1400         return 0;
1401 }
1402
1403 static void
1404 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1405                           uint32_t index)
1406 {
1407         PMD_INIT_FUNC_TRACE();
1408
1409         fman_if_clear_mac_addr(dev->process_private, index);
1410 }
1411
1412 static int
1413 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1414                        struct rte_ether_addr *addr)
1415 {
1416         int ret;
1417
1418         PMD_INIT_FUNC_TRACE();
1419
1420         ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1421         if (ret)
1422                 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1423
1424         return ret;
1425 }
1426
1427 static int
1428 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1429                          struct rte_eth_rss_conf *rss_conf)
1430 {
1431         struct rte_eth_dev_data *data = dev->data;
1432         struct rte_eth_conf *eth_conf = &data->dev_conf;
1433
1434         PMD_INIT_FUNC_TRACE();
1435
1436         if (!(default_q || fmc_q)) {
1437                 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1438                         DPAA_PMD_ERR("FM port configuration: Failed\n");
1439                         return -1;
1440                 }
1441                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1442         } else {
1443                 DPAA_PMD_ERR("Function not supported\n");
1444                 return -ENOTSUP;
1445         }
1446         return 0;
1447 }
1448
1449 static int
1450 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1451                            struct rte_eth_rss_conf *rss_conf)
1452 {
1453         struct rte_eth_dev_data *data = dev->data;
1454         struct rte_eth_conf *eth_conf = &data->dev_conf;
1455
1456         /* dpaa does not support rss_key, so length should be 0*/
1457         rss_conf->rss_key_len = 0;
1458         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1459         return 0;
1460 }
1461
1462 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1463                                       uint16_t queue_id)
1464 {
1465         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1466         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1467
1468         if (!rxq->is_static)
1469                 return -EINVAL;
1470
1471         return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1472 }
1473
1474 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1475                                        uint16_t queue_id)
1476 {
1477         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1478         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1479         uint32_t temp;
1480         ssize_t temp1;
1481
1482         if (!rxq->is_static)
1483                 return -EINVAL;
1484
1485         qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1486
1487         temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1488         if (temp1 != sizeof(temp))
1489                 DPAA_PMD_ERR("irq read error");
1490
1491         qman_fq_portal_thread_irq(rxq->qp);
1492
1493         return 0;
1494 }
1495
1496 static void
1497 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1498         struct rte_eth_rxq_info *qinfo)
1499 {
1500         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1501         struct qman_fq *rxq;
1502         int ret;
1503
1504         rxq = dev->data->rx_queues[queue_id];
1505
1506         qinfo->mp = dpaa_intf->bp_info->mp;
1507         qinfo->scattered_rx = dev->data->scattered_rx;
1508         qinfo->nb_desc = rxq->nb_desc;
1509
1510         /* Report the HW Rx buffer length to user */
1511         ret = fman_if_get_maxfrm(dev->process_private);
1512         if (ret > 0)
1513                 qinfo->rx_buf_size = ret;
1514
1515         qinfo->conf.rx_free_thresh = 1;
1516         qinfo->conf.rx_drop_en = 1;
1517         qinfo->conf.rx_deferred_start = 0;
1518         qinfo->conf.offloads = rxq->offloads;
1519 }
1520
1521 static void
1522 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1523         struct rte_eth_txq_info *qinfo)
1524 {
1525         struct qman_fq *txq;
1526
1527         txq = dev->data->tx_queues[queue_id];
1528
1529         qinfo->nb_desc = txq->nb_desc;
1530         qinfo->conf.tx_thresh.pthresh = 0;
1531         qinfo->conf.tx_thresh.hthresh = 0;
1532         qinfo->conf.tx_thresh.wthresh = 0;
1533
1534         qinfo->conf.tx_free_thresh = 0;
1535         qinfo->conf.tx_rs_thresh = 0;
1536         qinfo->conf.offloads = txq->offloads;
1537         qinfo->conf.tx_deferred_start = 0;
1538 }
1539
1540 static struct eth_dev_ops dpaa_devops = {
1541         .dev_configure            = dpaa_eth_dev_configure,
1542         .dev_start                = dpaa_eth_dev_start,
1543         .dev_stop                 = dpaa_eth_dev_stop,
1544         .dev_close                = dpaa_eth_dev_close,
1545         .dev_infos_get            = dpaa_eth_dev_info,
1546         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1547
1548         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
1549         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
1550         .rx_burst_mode_get        = dpaa_dev_rx_burst_mode_get,
1551         .tx_burst_mode_get        = dpaa_dev_tx_burst_mode_get,
1552         .rxq_info_get             = dpaa_rxq_info_get,
1553         .txq_info_get             = dpaa_txq_info_get,
1554
1555         .flow_ctrl_get            = dpaa_flow_ctrl_get,
1556         .flow_ctrl_set            = dpaa_flow_ctrl_set,
1557
1558         .link_update              = dpaa_eth_link_update,
1559         .stats_get                = dpaa_eth_stats_get,
1560         .xstats_get               = dpaa_dev_xstats_get,
1561         .xstats_get_by_id         = dpaa_xstats_get_by_id,
1562         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
1563         .xstats_get_names         = dpaa_xstats_get_names,
1564         .xstats_reset             = dpaa_eth_stats_reset,
1565         .stats_reset              = dpaa_eth_stats_reset,
1566         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
1567         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
1568         .allmulticast_enable      = dpaa_eth_multicast_enable,
1569         .allmulticast_disable     = dpaa_eth_multicast_disable,
1570         .mtu_set                  = dpaa_mtu_set,
1571         .dev_set_link_down        = dpaa_link_down,
1572         .dev_set_link_up          = dpaa_link_up,
1573         .mac_addr_add             = dpaa_dev_add_mac_addr,
1574         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
1575         .mac_addr_set             = dpaa_dev_set_mac_addr,
1576
1577         .fw_version_get           = dpaa_fw_version_get,
1578
1579         .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
1580         .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
1581         .rss_hash_update          = dpaa_dev_rss_hash_update,
1582         .rss_hash_conf_get        = dpaa_dev_rss_hash_conf_get,
1583 };
1584
1585 static bool
1586 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1587 {
1588         if (strcmp(dev->device->driver->name,
1589                    drv->driver.name))
1590                 return false;
1591
1592         return true;
1593 }
1594
1595 static bool
1596 is_dpaa_supported(struct rte_eth_dev *dev)
1597 {
1598         return is_device_supported(dev, &rte_dpaa_pmd);
1599 }
1600
1601 int
1602 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1603 {
1604         struct rte_eth_dev *dev;
1605
1606         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1607
1608         dev = &rte_eth_devices[port];
1609
1610         if (!is_dpaa_supported(dev))
1611                 return -ENOTSUP;
1612
1613         if (on)
1614                 fman_if_loopback_enable(dev->process_private);
1615         else
1616                 fman_if_loopback_disable(dev->process_private);
1617
1618         return 0;
1619 }
1620
1621 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1622                                struct fman_if *fman_intf)
1623 {
1624         struct rte_eth_fc_conf *fc_conf;
1625         int ret;
1626
1627         PMD_INIT_FUNC_TRACE();
1628
1629         if (!(dpaa_intf->fc_conf)) {
1630                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1631                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1632                 if (!dpaa_intf->fc_conf) {
1633                         DPAA_PMD_ERR("unable to save flow control info");
1634                         return -ENOMEM;
1635                 }
1636         }
1637         fc_conf = dpaa_intf->fc_conf;
1638         ret = fman_if_get_fc_threshold(fman_intf);
1639         if (ret) {
1640                 fc_conf->mode = RTE_FC_TX_PAUSE;
1641                 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1642         } else {
1643                 fc_conf->mode = RTE_FC_NONE;
1644         }
1645
1646         return 0;
1647 }
1648
1649 /* Initialise an Rx FQ */
1650 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1651                               uint32_t fqid)
1652 {
1653         struct qm_mcc_initfq opts = {0};
1654         int ret;
1655         u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1656         struct qm_mcc_initcgr cgr_opts = {
1657                 .we_mask = QM_CGR_WE_CS_THRES |
1658                                 QM_CGR_WE_CSTD_EN |
1659                                 QM_CGR_WE_MODE,
1660                 .cgr = {
1661                         .cstd_en = QM_CGR_EN,
1662                         .mode = QMAN_CGR_MODE_FRAME
1663                 }
1664         };
1665
1666         if (fmc_q || default_q) {
1667                 ret = qman_reserve_fqid(fqid);
1668                 if (ret) {
1669                         DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
1670                                      fqid, ret);
1671                         return -EINVAL;
1672                 }
1673         }
1674
1675         DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1676         ret = qman_create_fq(fqid, flags, fq);
1677         if (ret) {
1678                 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1679                         fqid, ret);
1680                 return ret;
1681         }
1682         fq->is_static = false;
1683
1684         dpaa_poll_queue_default_config(&opts);
1685
1686         if (cgr_rx) {
1687                 /* Enable tail drop with cgr on this queue */
1688                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1689                 cgr_rx->cb = NULL;
1690                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1691                                       &cgr_opts);
1692                 if (ret) {
1693                         DPAA_PMD_WARN(
1694                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1695                                 fq->fqid, ret);
1696                         goto without_cgr;
1697                 }
1698                 opts.we_mask |= QM_INITFQ_WE_CGID;
1699                 opts.fqd.cgid = cgr_rx->cgrid;
1700                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1701         }
1702 without_cgr:
1703         ret = qman_init_fq(fq, 0, &opts);
1704         if (ret)
1705                 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1706         return ret;
1707 }
1708
1709 /* Initialise a Tx FQ */
1710 static int dpaa_tx_queue_init(struct qman_fq *fq,
1711                               struct fman_if *fman_intf,
1712                               struct qman_cgr *cgr_tx)
1713 {
1714         struct qm_mcc_initfq opts = {0};
1715         struct qm_mcc_initcgr cgr_opts = {
1716                 .we_mask = QM_CGR_WE_CS_THRES |
1717                                 QM_CGR_WE_CSTD_EN |
1718                                 QM_CGR_WE_MODE,
1719                 .cgr = {
1720                         .cstd_en = QM_CGR_EN,
1721                         .mode = QMAN_CGR_MODE_FRAME
1722                 }
1723         };
1724         int ret;
1725
1726         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1727                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1728         if (ret) {
1729                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1730                 return ret;
1731         }
1732         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1733                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1734         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1735         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1736         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1737         opts.fqd.context_b = 0;
1738         /* no tx-confirmation */
1739         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1740         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1741         DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1742
1743         if (cgr_tx) {
1744                 /* Enable tail drop with cgr on this queue */
1745                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1746                                       td_tx_threshold, 0);
1747                 cgr_tx->cb = NULL;
1748                 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1749                                       &cgr_opts);
1750                 if (ret) {
1751                         DPAA_PMD_WARN(
1752                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1753                                 fq->fqid, ret);
1754                         goto without_cgr;
1755                 }
1756                 opts.we_mask |= QM_INITFQ_WE_CGID;
1757                 opts.fqd.cgid = cgr_tx->cgrid;
1758                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1759                 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1760                                 td_tx_threshold);
1761         }
1762 without_cgr:
1763         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1764         if (ret)
1765                 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1766         return ret;
1767 }
1768
1769 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1770 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1771 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1772 {
1773         struct qm_mcc_initfq opts = {0};
1774         int ret;
1775
1776         PMD_INIT_FUNC_TRACE();
1777
1778         ret = qman_reserve_fqid(fqid);
1779         if (ret) {
1780                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1781                         fqid, ret);
1782                 return -EINVAL;
1783         }
1784         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1785         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1786         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1787         if (ret) {
1788                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1789                         fqid, ret);
1790                 return ret;
1791         }
1792         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1793         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1794         ret = qman_init_fq(fq, 0, &opts);
1795         if (ret)
1796                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1797                             fqid, ret);
1798         return ret;
1799 }
1800 #endif
1801
1802 /* Initialise a network interface */
1803 static int
1804 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1805 {
1806         struct rte_dpaa_device *dpaa_device;
1807         struct fm_eth_port_cfg *cfg;
1808         struct dpaa_if *dpaa_intf;
1809         struct fman_if *fman_intf;
1810         int dev_id;
1811
1812         PMD_INIT_FUNC_TRACE();
1813
1814         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1815         dev_id = dpaa_device->id.dev_id;
1816         cfg = dpaa_get_eth_port_cfg(dev_id);
1817         fman_intf = cfg->fman_if;
1818         eth_dev->process_private = fman_intf;
1819
1820         /* Plugging of UCODE burst API not supported in Secondary */
1821         dpaa_intf = eth_dev->data->dev_private;
1822         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1823         if (dpaa_intf->cgr_tx)
1824                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1825         else
1826                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1827 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1828         qman_set_fq_lookup_table(
1829                 dpaa_intf->rx_queues->qman_fq_lookup_table);
1830 #endif
1831
1832         return 0;
1833 }
1834
1835 /* Initialise a network interface */
1836 static int
1837 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1838 {
1839         int num_rx_fqs, fqid;
1840         int loop, ret = 0;
1841         int dev_id;
1842         struct rte_dpaa_device *dpaa_device;
1843         struct dpaa_if *dpaa_intf;
1844         struct fm_eth_port_cfg *cfg;
1845         struct fman_if *fman_intf;
1846         struct fman_if_bpool *bp, *tmp_bp;
1847         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1848         uint32_t cgrid_tx[MAX_DPAA_CORES];
1849         uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1850         int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1851         int8_t vsp_id = -1;
1852
1853         PMD_INIT_FUNC_TRACE();
1854
1855         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1856         dev_id = dpaa_device->id.dev_id;
1857         dpaa_intf = eth_dev->data->dev_private;
1858         cfg = dpaa_get_eth_port_cfg(dev_id);
1859         fman_intf = cfg->fman_if;
1860
1861         dpaa_intf->name = dpaa_device->name;
1862
1863         /* save fman_if & cfg in the interface struture */
1864         eth_dev->process_private = fman_intf;
1865         dpaa_intf->ifid = dev_id;
1866         dpaa_intf->cfg = cfg;
1867
1868         memset((char *)dev_rx_fqids, 0,
1869                 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1870
1871         memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1872
1873         /* Initialize Rx FQ's */
1874         if (default_q) {
1875                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1876         } else if (fmc_q) {
1877                 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1878                                                 dev_vspids,
1879                                                 DPAA_MAX_NUM_PCD_QUEUES);
1880                 if (num_rx_fqs < 0) {
1881                         DPAA_PMD_ERR("%s FMC initializes failed!",
1882                                 dpaa_intf->name);
1883                         goto free_rx;
1884                 }
1885                 if (!num_rx_fqs) {
1886                         DPAA_PMD_WARN("%s is not configured by FMC.",
1887                                 dpaa_intf->name);
1888                 }
1889         } else {
1890                 /* FMCLESS mode, load balance to multiple cores.*/
1891                 num_rx_fqs = rte_lcore_count();
1892         }
1893
1894         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1895          * queues.
1896          */
1897         if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1898                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1899                 return -EINVAL;
1900         }
1901
1902         if (num_rx_fqs > 0) {
1903                 dpaa_intf->rx_queues = rte_zmalloc(NULL,
1904                         sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1905                 if (!dpaa_intf->rx_queues) {
1906                         DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1907                         return -ENOMEM;
1908                 }
1909         } else {
1910                 dpaa_intf->rx_queues = NULL;
1911         }
1912
1913         memset(cgrid, 0, sizeof(cgrid));
1914         memset(cgrid_tx, 0, sizeof(cgrid_tx));
1915
1916         /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1917          * Tx tail drop is disabled.
1918          */
1919         if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1920                 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1921                 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1922                                td_tx_threshold);
1923                 /* if a very large value is being configured */
1924                 if (td_tx_threshold > UINT16_MAX)
1925                         td_tx_threshold = CGR_RX_PERFQ_THRESH;
1926         }
1927
1928         /* If congestion control is enabled globally*/
1929         if (num_rx_fqs > 0 && td_threshold) {
1930                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1931                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1932                 if (!dpaa_intf->cgr_rx) {
1933                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1934                         ret = -ENOMEM;
1935                         goto free_rx;
1936                 }
1937
1938                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1939                 if (ret != num_rx_fqs) {
1940                         DPAA_PMD_WARN("insufficient CGRIDs available");
1941                         ret = -EINVAL;
1942                         goto free_rx;
1943                 }
1944         } else {
1945                 dpaa_intf->cgr_rx = NULL;
1946         }
1947
1948         if (!fmc_q && !default_q) {
1949                 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1950                                             num_rx_fqs, 0);
1951                 if (ret < 0) {
1952                         DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1953                         goto free_rx;
1954                 }
1955         }
1956
1957         for (loop = 0; loop < num_rx_fqs; loop++) {
1958                 if (default_q)
1959                         fqid = cfg->rx_def;
1960                 else
1961                         fqid = dev_rx_fqids[loop];
1962
1963                 vsp_id = dev_vspids[loop];
1964
1965                 if (dpaa_intf->cgr_rx)
1966                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1967
1968                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1969                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1970                         fqid);
1971                 if (ret)
1972                         goto free_rx;
1973                 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
1974                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1975         }
1976         dpaa_intf->nb_rx_queues = num_rx_fqs;
1977
1978         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1979         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1980                 MAX_DPAA_CORES, MAX_CACHELINE);
1981         if (!dpaa_intf->tx_queues) {
1982                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1983                 ret = -ENOMEM;
1984                 goto free_rx;
1985         }
1986
1987         /* If congestion control is enabled globally*/
1988         if (td_tx_threshold) {
1989                 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
1990                         sizeof(struct qman_cgr) * MAX_DPAA_CORES,
1991                         MAX_CACHELINE);
1992                 if (!dpaa_intf->cgr_tx) {
1993                         DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
1994                         ret = -ENOMEM;
1995                         goto free_rx;
1996                 }
1997
1998                 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
1999                                              1, 0);
2000                 if (ret != MAX_DPAA_CORES) {
2001                         DPAA_PMD_WARN("insufficient CGRIDs available");
2002                         ret = -EINVAL;
2003                         goto free_rx;
2004                 }
2005         } else {
2006                 dpaa_intf->cgr_tx = NULL;
2007         }
2008
2009
2010         for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
2011                 if (dpaa_intf->cgr_tx)
2012                         dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
2013
2014                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
2015                         fman_intf,
2016                         dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
2017                 if (ret)
2018                         goto free_tx;
2019                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
2020         }
2021         dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
2022
2023 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2024         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2025                         [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
2026         if (ret) {
2027                 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
2028                 goto free_tx;
2029         }
2030         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
2031         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2032                         [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
2033         if (ret) {
2034                 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
2035                 goto free_tx;
2036         }
2037         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
2038 #endif
2039
2040         DPAA_PMD_DEBUG("All frame queues created");
2041
2042         /* Get the initial configuration for flow control */
2043         dpaa_fc_set_default(dpaa_intf, fman_intf);
2044
2045         /* reset bpool list, initialize bpool dynamically */
2046         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
2047                 list_del(&bp->node);
2048                 rte_free(bp);
2049         }
2050
2051         /* Populate ethdev structure */
2052         eth_dev->dev_ops = &dpaa_devops;
2053         eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
2054         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
2055         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
2056
2057         /* Allocate memory for storing MAC addresses */
2058         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2059                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
2060         if (eth_dev->data->mac_addrs == NULL) {
2061                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
2062                                                 "store MAC addresses",
2063                                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
2064                 ret = -ENOMEM;
2065                 goto free_tx;
2066         }
2067
2068         /* copy the primary mac address */
2069         rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
2070
2071         RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
2072                 dpaa_device->name, RTE_ETHER_ADDR_BYTES(&fman_intf->mac_addr));
2073
2074         if (!fman_intf->is_shared_mac) {
2075                 /* Configure error packet handling */
2076                 fman_if_receive_rx_errors(fman_intf,
2077                         FM_FD_RX_STATUS_ERR_MASK);
2078                 /* Disable RX mode */
2079                 fman_if_disable_rx(fman_intf);
2080                 /* Disable promiscuous mode */
2081                 fman_if_promiscuous_disable(fman_intf);
2082                 /* Disable multicast */
2083                 fman_if_reset_mcast_filter_table(fman_intf);
2084                 /* Reset interface statistics */
2085                 fman_if_stats_reset(fman_intf);
2086                 /* Disable SG by default */
2087                 fman_if_set_sg(fman_intf, 0);
2088                 fman_if_set_maxfrm(fman_intf,
2089                                    RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2090         }
2091
2092         return 0;
2093
2094 free_tx:
2095         rte_free(dpaa_intf->tx_queues);
2096         dpaa_intf->tx_queues = NULL;
2097         dpaa_intf->nb_tx_queues = 0;
2098
2099 free_rx:
2100         rte_free(dpaa_intf->cgr_rx);
2101         rte_free(dpaa_intf->cgr_tx);
2102         rte_free(dpaa_intf->rx_queues);
2103         dpaa_intf->rx_queues = NULL;
2104         dpaa_intf->nb_rx_queues = 0;
2105         return ret;
2106 }
2107
2108 static int
2109 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2110                struct rte_dpaa_device *dpaa_dev)
2111 {
2112         int diag;
2113         int ret;
2114         struct rte_eth_dev *eth_dev;
2115
2116         PMD_INIT_FUNC_TRACE();
2117
2118         if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
2119                 RTE_PKTMBUF_HEADROOM) {
2120                 DPAA_PMD_ERR(
2121                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
2122                 RTE_PKTMBUF_HEADROOM,
2123                 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
2124
2125                 return -1;
2126         }
2127
2128         /* In case of secondary process, the device is already configured
2129          * and no further action is required, except portal initialization
2130          * and verifying secondary attachment to port name.
2131          */
2132         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2133                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2134                 if (!eth_dev)
2135                         return -ENOMEM;
2136                 eth_dev->device = &dpaa_dev->device;
2137                 eth_dev->dev_ops = &dpaa_devops;
2138
2139                 ret = dpaa_dev_init_secondary(eth_dev);
2140                 if (ret != 0) {
2141                         RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2142                         return ret;
2143                 }
2144
2145                 rte_eth_dev_probing_finish(eth_dev);
2146                 return 0;
2147         }
2148
2149         if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2150                 if (access("/tmp/fmc.bin", F_OK) == -1) {
2151                         DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2152                         default_q = 1;
2153                 }
2154
2155                 if (!(default_q || fmc_q)) {
2156                         if (dpaa_fm_init()) {
2157                                 DPAA_PMD_ERR("FM init failed\n");
2158                                 return -1;
2159                         }
2160                 }
2161
2162                 /* disabling the default push mode for LS1043 */
2163                 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2164                         dpaa_push_mode_max_queue = 0;
2165
2166                 /* if push mode queues to be enabled. Currenly we are allowing
2167                  * only one queue per thread.
2168                  */
2169                 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2170                         dpaa_push_mode_max_queue =
2171                                         atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2172                         if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2173                             dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2174                 }
2175
2176                 is_global_init = 1;
2177         }
2178
2179         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2180                 ret = rte_dpaa_portal_init((void *)1);
2181                 if (ret) {
2182                         DPAA_PMD_ERR("Unable to initialize portal");
2183                         return ret;
2184                 }
2185         }
2186
2187         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2188         if (!eth_dev)
2189                 return -ENOMEM;
2190
2191         eth_dev->data->dev_private =
2192                         rte_zmalloc("ethdev private structure",
2193                                         sizeof(struct dpaa_if),
2194                                         RTE_CACHE_LINE_SIZE);
2195         if (!eth_dev->data->dev_private) {
2196                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
2197                 rte_eth_dev_release_port(eth_dev);
2198                 return -ENOMEM;
2199         }
2200
2201         eth_dev->device = &dpaa_dev->device;
2202         dpaa_dev->eth_dev = eth_dev;
2203
2204         qman_ern_register_cb(dpaa_free_mbuf);
2205
2206         if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2207                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2208
2209         /* Invoke PMD device initialization function */
2210         diag = dpaa_dev_init(eth_dev);
2211         if (diag == 0) {
2212                 rte_eth_dev_probing_finish(eth_dev);
2213                 return 0;
2214         }
2215
2216         rte_eth_dev_release_port(eth_dev);
2217         return diag;
2218 }
2219
2220 static int
2221 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2222 {
2223         struct rte_eth_dev *eth_dev;
2224         int ret;
2225
2226         PMD_INIT_FUNC_TRACE();
2227
2228         eth_dev = dpaa_dev->eth_dev;
2229         dpaa_eth_dev_close(eth_dev);
2230         ret = rte_eth_dev_release_port(eth_dev);
2231
2232         return ret;
2233 }
2234
2235 static void __attribute__((destructor(102))) dpaa_finish(void)
2236 {
2237         /* For secondary, primary will do all the cleanup */
2238         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2239                 return;
2240
2241         if (!(default_q || fmc_q)) {
2242                 unsigned int i;
2243
2244                 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2245                         if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2246                                 struct rte_eth_dev *dev = &rte_eth_devices[i];
2247                                 struct dpaa_if *dpaa_intf =
2248                                         dev->data->dev_private;
2249                                 struct fman_if *fif =
2250                                         dev->process_private;
2251                                 if (dpaa_intf->port_handle)
2252                                         if (dpaa_fm_deconfig(dpaa_intf, fif))
2253                                                 DPAA_PMD_WARN("DPAA FM "
2254                                                         "deconfig failed\n");
2255                                 if (fif->num_profiles) {
2256                                         if (dpaa_port_vsp_cleanup(dpaa_intf,
2257                                                                   fif))
2258                                                 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2259                                 }
2260                         }
2261                 }
2262                 if (is_global_init)
2263                         if (dpaa_fm_term())
2264                                 DPAA_PMD_WARN("DPAA FM term failed\n");
2265
2266                 is_global_init = 0;
2267
2268                 DPAA_PMD_INFO("DPAA fman cleaned up");
2269         }
2270 }
2271
2272 static struct rte_dpaa_driver rte_dpaa_pmd = {
2273         .drv_flags = RTE_DPAA_DRV_INTR_LSC,
2274         .drv_type = FSL_DPAA_ETH,
2275         .probe = rte_dpaa_probe,
2276         .remove = rte_dpaa_remove,
2277 };
2278
2279 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2280 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);