c5739a3a8050f16cb487e8b77477224435b846eb
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2020 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
22 #include <rte_log.h>
23 #include <rte_debug.h>
24 #include <rte_pci.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
29 #include <rte_eal.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <ethdev_driver.h>
33 #include <rte_malloc.h>
34 #include <rte_ring.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
39
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <dpaa_flow.h>
43 #include <rte_pmd_dpaa.h>
44
45 #include <fsl_usd.h>
46 #include <fsl_qman.h>
47 #include <fsl_bman.h>
48 #include <fsl_fman.h>
49 #include <process.h>
50 #include <fmlib/fm_ext.h>
51
52 #define CHECK_INTERVAL         100  /* 100ms */
53 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
54
55 /* Supported Rx offloads */
56 static uint64_t dev_rx_offloads_sup =
57                 DEV_RX_OFFLOAD_JUMBO_FRAME |
58                 DEV_RX_OFFLOAD_SCATTER;
59
60 /* Rx offloads which cannot be disabled */
61 static uint64_t dev_rx_offloads_nodis =
62                 DEV_RX_OFFLOAD_IPV4_CKSUM |
63                 DEV_RX_OFFLOAD_UDP_CKSUM |
64                 DEV_RX_OFFLOAD_TCP_CKSUM |
65                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
66                 DEV_RX_OFFLOAD_RSS_HASH;
67
68 /* Supported Tx offloads */
69 static uint64_t dev_tx_offloads_sup =
70                 DEV_TX_OFFLOAD_MT_LOCKFREE |
71                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
72
73 /* Tx offloads which cannot be disabled */
74 static uint64_t dev_tx_offloads_nodis =
75                 DEV_TX_OFFLOAD_IPV4_CKSUM |
76                 DEV_TX_OFFLOAD_UDP_CKSUM |
77                 DEV_TX_OFFLOAD_TCP_CKSUM |
78                 DEV_TX_OFFLOAD_SCTP_CKSUM |
79                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
80                 DEV_TX_OFFLOAD_MULTI_SEGS;
81
82 /* Keep track of whether QMAN and BMAN have been globally initialized */
83 static int is_global_init;
84 static int fmc_q = 1;   /* Indicates the use of static fmc for distribution */
85 static int default_q;   /* use default queue - FMC is not executed*/
86 /* At present we only allow up to 4 push mode queues as default - as each of
87  * this queue need dedicated portal and we are short of portals.
88  */
89 #define DPAA_MAX_PUSH_MODE_QUEUE       8
90 #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
91
92 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
93 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
94
95
96 /* Per RX FQ Taildrop in frame count */
97 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
98
99 /* Per TX FQ Taildrop in frame count, disabled by default */
100 static unsigned int td_tx_threshold;
101
102 struct rte_dpaa_xstats_name_off {
103         char name[RTE_ETH_XSTATS_NAME_SIZE];
104         uint32_t offset;
105 };
106
107 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
108         {"rx_align_err",
109                 offsetof(struct dpaa_if_stats, raln)},
110         {"rx_valid_pause",
111                 offsetof(struct dpaa_if_stats, rxpf)},
112         {"rx_fcs_err",
113                 offsetof(struct dpaa_if_stats, rfcs)},
114         {"rx_vlan_frame",
115                 offsetof(struct dpaa_if_stats, rvlan)},
116         {"rx_frame_err",
117                 offsetof(struct dpaa_if_stats, rerr)},
118         {"rx_drop_err",
119                 offsetof(struct dpaa_if_stats, rdrp)},
120         {"rx_undersized",
121                 offsetof(struct dpaa_if_stats, rund)},
122         {"rx_oversize_err",
123                 offsetof(struct dpaa_if_stats, rovr)},
124         {"rx_fragment_pkt",
125                 offsetof(struct dpaa_if_stats, rfrg)},
126         {"tx_valid_pause",
127                 offsetof(struct dpaa_if_stats, txpf)},
128         {"tx_fcs_err",
129                 offsetof(struct dpaa_if_stats, terr)},
130         {"tx_vlan_frame",
131                 offsetof(struct dpaa_if_stats, tvlan)},
132         {"rx_undersized",
133                 offsetof(struct dpaa_if_stats, tund)},
134 };
135
136 static struct rte_dpaa_driver rte_dpaa_pmd;
137
138 static int
139 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
140
141 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
142                                 int wait_to_complete __rte_unused);
143
144 static void dpaa_interrupt_handler(void *param);
145
146 static inline void
147 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
148 {
149         memset(opts, 0, sizeof(struct qm_mcc_initfq));
150         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
151         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
152                            QM_FQCTRL_PREFERINCACHE;
153         opts->fqd.context_a.stashing.exclusive = 0;
154         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
155                 opts->fqd.context_a.stashing.annotation_cl =
156                                                 DPAA_IF_RX_ANNOTATION_STASH;
157         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
158         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
159 }
160
161 static int
162 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
163 {
164         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
165                                 + VLAN_TAG_SIZE;
166         uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
167
168         PMD_INIT_FUNC_TRACE();
169
170         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
171                 return -EINVAL;
172         /*
173          * Refuse mtu that requires the support of scattered packets
174          * when this feature has not been enabled before.
175          */
176         if (dev->data->min_rx_buf_size &&
177                 !dev->data->scattered_rx && frame_size > buffsz) {
178                 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
179                 return -EINVAL;
180         }
181
182         /* check <seg size> * <max_seg>  >= max_frame */
183         if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
184                 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
185                 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
186                                 buffsz * DPAA_SGT_MAX_ENTRIES);
187                 return -EINVAL;
188         }
189
190         if (frame_size > DPAA_ETH_MAX_LEN)
191                 dev->data->dev_conf.rxmode.offloads |=
192                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
193         else
194                 dev->data->dev_conf.rxmode.offloads &=
195                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
196
197         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
198
199         fman_if_set_maxfrm(dev->process_private, frame_size);
200
201         return 0;
202 }
203
204 static int
205 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
206 {
207         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
208         uint64_t rx_offloads = eth_conf->rxmode.offloads;
209         uint64_t tx_offloads = eth_conf->txmode.offloads;
210         struct rte_device *rdev = dev->device;
211         struct rte_eth_link *link = &dev->data->dev_link;
212         struct rte_dpaa_device *dpaa_dev;
213         struct fman_if *fif = dev->process_private;
214         struct __fman_if *__fif;
215         struct rte_intr_handle *intr_handle;
216         int speed, duplex;
217         int ret;
218
219         PMD_INIT_FUNC_TRACE();
220
221         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
222         intr_handle = &dpaa_dev->intr_handle;
223         __fif = container_of(fif, struct __fman_if, __if);
224
225         /* Rx offloads which are enabled by default */
226         if (dev_rx_offloads_nodis & ~rx_offloads) {
227                 DPAA_PMD_INFO(
228                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
229                 " fixed are 0x%" PRIx64,
230                 rx_offloads, dev_rx_offloads_nodis);
231         }
232
233         /* Tx offloads which are enabled by default */
234         if (dev_tx_offloads_nodis & ~tx_offloads) {
235                 DPAA_PMD_INFO(
236                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
237                 " fixed are 0x%" PRIx64,
238                 tx_offloads, dev_tx_offloads_nodis);
239         }
240
241         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
242                 uint32_t max_len;
243
244                 DPAA_PMD_DEBUG("enabling jumbo");
245
246                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
247                     DPAA_MAX_RX_PKT_LEN)
248                         max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
249                 else {
250                         DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
251                                 "supported is %d",
252                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
253                                 DPAA_MAX_RX_PKT_LEN);
254                         max_len = DPAA_MAX_RX_PKT_LEN;
255                 }
256
257                 fman_if_set_maxfrm(dev->process_private, max_len);
258                 dev->data->mtu = max_len
259                         - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
260         }
261
262         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
263                 DPAA_PMD_DEBUG("enabling scatter mode");
264                 fman_if_set_sg(dev->process_private, 1);
265                 dev->data->scattered_rx = 1;
266         }
267
268         if (!(default_q || fmc_q)) {
269                 if (dpaa_fm_config(dev,
270                         eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
271                         dpaa_write_fm_config_to_file();
272                         DPAA_PMD_ERR("FM port configuration: Failed\n");
273                         return -1;
274                 }
275                 dpaa_write_fm_config_to_file();
276         }
277
278         /* if the interrupts were configured on this devices*/
279         if (intr_handle && intr_handle->fd) {
280                 if (dev->data->dev_conf.intr_conf.lsc != 0)
281                         rte_intr_callback_register(intr_handle,
282                                            dpaa_interrupt_handler,
283                                            (void *)dev);
284
285                 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
286                 if (ret) {
287                         if (dev->data->dev_conf.intr_conf.lsc != 0) {
288                                 rte_intr_callback_unregister(intr_handle,
289                                         dpaa_interrupt_handler,
290                                         (void *)dev);
291                                 if (ret == EINVAL)
292                                         printf("Failed to enable interrupt: Not Supported\n");
293                                 else
294                                         printf("Failed to enable interrupt\n");
295                         }
296                         dev->data->dev_conf.intr_conf.lsc = 0;
297                         dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
298                 }
299         }
300
301         /* Wait for link status to get updated */
302         if (!link->link_status)
303                 sleep(1);
304
305         /* Configure link only if link is UP*/
306         if (link->link_status) {
307                 if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
308                         /* Start autoneg only if link is not in autoneg mode */
309                         if (!link->link_autoneg)
310                                 dpaa_restart_link_autoneg(__fif->node_name);
311                 } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
312                         switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
313                         case ETH_LINK_SPEED_10M_HD:
314                                 speed = ETH_SPEED_NUM_10M;
315                                 duplex = ETH_LINK_HALF_DUPLEX;
316                                 break;
317                         case ETH_LINK_SPEED_10M:
318                                 speed = ETH_SPEED_NUM_10M;
319                                 duplex = ETH_LINK_FULL_DUPLEX;
320                                 break;
321                         case ETH_LINK_SPEED_100M_HD:
322                                 speed = ETH_SPEED_NUM_100M;
323                                 duplex = ETH_LINK_HALF_DUPLEX;
324                                 break;
325                         case ETH_LINK_SPEED_100M:
326                                 speed = ETH_SPEED_NUM_100M;
327                                 duplex = ETH_LINK_FULL_DUPLEX;
328                                 break;
329                         case ETH_LINK_SPEED_1G:
330                                 speed = ETH_SPEED_NUM_1G;
331                                 duplex = ETH_LINK_FULL_DUPLEX;
332                                 break;
333                         case ETH_LINK_SPEED_2_5G:
334                                 speed = ETH_SPEED_NUM_2_5G;
335                                 duplex = ETH_LINK_FULL_DUPLEX;
336                                 break;
337                         case ETH_LINK_SPEED_10G:
338                                 speed = ETH_SPEED_NUM_10G;
339                                 duplex = ETH_LINK_FULL_DUPLEX;
340                                 break;
341                         default:
342                                 speed = ETH_SPEED_NUM_NONE;
343                                 duplex = ETH_LINK_FULL_DUPLEX;
344                                 break;
345                         }
346                         /* Set link speed */
347                         dpaa_update_link_speed(__fif->node_name, speed, duplex);
348                 } else {
349                         /* Manual autoneg - custom advertisement speed. */
350                         printf("Custom Advertisement speeds not supported\n");
351                 }
352         }
353
354         return 0;
355 }
356
357 static const uint32_t *
358 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
359 {
360         static const uint32_t ptypes[] = {
361                 RTE_PTYPE_L2_ETHER,
362                 RTE_PTYPE_L2_ETHER_VLAN,
363                 RTE_PTYPE_L2_ETHER_ARP,
364                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
365                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
366                 RTE_PTYPE_L4_ICMP,
367                 RTE_PTYPE_L4_TCP,
368                 RTE_PTYPE_L4_UDP,
369                 RTE_PTYPE_L4_FRAG,
370                 RTE_PTYPE_L4_TCP,
371                 RTE_PTYPE_L4_UDP,
372                 RTE_PTYPE_L4_SCTP
373         };
374
375         PMD_INIT_FUNC_TRACE();
376
377         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
378                 return ptypes;
379         return NULL;
380 }
381
382 static void dpaa_interrupt_handler(void *param)
383 {
384         struct rte_eth_dev *dev = param;
385         struct rte_device *rdev = dev->device;
386         struct rte_dpaa_device *dpaa_dev;
387         struct rte_intr_handle *intr_handle;
388         uint64_t buf;
389         int bytes_read;
390
391         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
392         intr_handle = &dpaa_dev->intr_handle;
393
394         bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
395         if (bytes_read < 0)
396                 DPAA_PMD_ERR("Error reading eventfd\n");
397         dpaa_eth_link_update(dev, 0);
398         rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
399 }
400
401 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
402 {
403         struct dpaa_if *dpaa_intf = dev->data->dev_private;
404
405         PMD_INIT_FUNC_TRACE();
406
407         if (!(default_q || fmc_q))
408                 dpaa_write_fm_config_to_file();
409
410         /* Change tx callback to the real one */
411         if (dpaa_intf->cgr_tx)
412                 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
413         else
414                 dev->tx_pkt_burst = dpaa_eth_queue_tx;
415
416         fman_if_enable_rx(dev->process_private);
417
418         return 0;
419 }
420
421 static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
422 {
423         struct fman_if *fif = dev->process_private;
424
425         PMD_INIT_FUNC_TRACE();
426         dev->data->dev_started = 0;
427
428         if (!fif->is_shared_mac)
429                 fman_if_disable_rx(fif);
430         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
431
432         return 0;
433 }
434
435 static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
436 {
437         struct fman_if *fif = dev->process_private;
438         struct __fman_if *__fif;
439         struct rte_device *rdev = dev->device;
440         struct rte_dpaa_device *dpaa_dev;
441         struct rte_intr_handle *intr_handle;
442         struct rte_eth_link *link = &dev->data->dev_link;
443         struct dpaa_if *dpaa_intf = dev->data->dev_private;
444         int loop;
445         int ret;
446
447         PMD_INIT_FUNC_TRACE();
448
449         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
450                 return 0;
451
452         if (!dpaa_intf) {
453                 DPAA_PMD_WARN("Already closed or not started");
454                 return -1;
455         }
456
457         /* DPAA FM deconfig */
458         if (!(default_q || fmc_q)) {
459                 if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
460                         DPAA_PMD_WARN("DPAA FM deconfig failed\n");
461         }
462
463         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
464         intr_handle = &dpaa_dev->intr_handle;
465         __fif = container_of(fif, struct __fman_if, __if);
466
467         ret = dpaa_eth_dev_stop(dev);
468
469         /* Reset link to autoneg */
470         if (link->link_status && !link->link_autoneg)
471                 dpaa_restart_link_autoneg(__fif->node_name);
472
473         if (intr_handle && intr_handle->fd &&
474             dev->data->dev_conf.intr_conf.lsc != 0) {
475                 dpaa_intr_disable(__fif->node_name);
476                 rte_intr_callback_unregister(intr_handle,
477                                              dpaa_interrupt_handler,
478                                              (void *)dev);
479         }
480
481         /* release configuration memory */
482         if (dpaa_intf->fc_conf)
483                 rte_free(dpaa_intf->fc_conf);
484
485         /* Release RX congestion Groups */
486         if (dpaa_intf->cgr_rx) {
487                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
488                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
489         }
490
491         rte_free(dpaa_intf->cgr_rx);
492         dpaa_intf->cgr_rx = NULL;
493         /* Release TX congestion Groups */
494         if (dpaa_intf->cgr_tx) {
495                 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
496                         qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
497                 rte_free(dpaa_intf->cgr_tx);
498                 dpaa_intf->cgr_tx = NULL;
499         }
500
501         rte_free(dpaa_intf->rx_queues);
502         dpaa_intf->rx_queues = NULL;
503
504         rte_free(dpaa_intf->tx_queues);
505         dpaa_intf->tx_queues = NULL;
506
507         return ret;
508 }
509
510 static int
511 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
512                      char *fw_version,
513                      size_t fw_size)
514 {
515         int ret;
516         FILE *svr_file = NULL;
517         unsigned int svr_ver = 0;
518
519         PMD_INIT_FUNC_TRACE();
520
521         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
522         if (!svr_file) {
523                 DPAA_PMD_ERR("Unable to open SoC device");
524                 return -ENOTSUP; /* Not supported on this infra */
525         }
526         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
527                 dpaa_svr_family = svr_ver & SVR_MASK;
528         else
529                 DPAA_PMD_ERR("Unable to read SoC device");
530
531         fclose(svr_file);
532
533         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
534                        svr_ver, fman_ip_rev);
535         if (ret < 0)
536                 return -EINVAL;
537
538         ret += 1; /* add the size of '\0' */
539         if (fw_size < (size_t)ret)
540                 return ret;
541         else
542                 return 0;
543 }
544
545 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
546                              struct rte_eth_dev_info *dev_info)
547 {
548         struct dpaa_if *dpaa_intf = dev->data->dev_private;
549         struct fman_if *fif = dev->process_private;
550
551         DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
552
553         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
554         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
555         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
556         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
557         dev_info->max_hash_mac_addrs = 0;
558         dev_info->max_vfs = 0;
559         dev_info->max_vmdq_pools = ETH_16_POOLS;
560         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
561
562         if (fif->mac_type == fman_mac_1g) {
563                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
564                                         | ETH_LINK_SPEED_10M
565                                         | ETH_LINK_SPEED_100M_HD
566                                         | ETH_LINK_SPEED_100M
567                                         | ETH_LINK_SPEED_1G;
568         } else if (fif->mac_type == fman_mac_2_5g) {
569                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
570                                         | ETH_LINK_SPEED_10M
571                                         | ETH_LINK_SPEED_100M_HD
572                                         | ETH_LINK_SPEED_100M
573                                         | ETH_LINK_SPEED_1G
574                                         | ETH_LINK_SPEED_2_5G;
575         } else if (fif->mac_type == fman_mac_10g) {
576                 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
577                                         | ETH_LINK_SPEED_10M
578                                         | ETH_LINK_SPEED_100M_HD
579                                         | ETH_LINK_SPEED_100M
580                                         | ETH_LINK_SPEED_1G
581                                         | ETH_LINK_SPEED_2_5G
582                                         | ETH_LINK_SPEED_10G;
583         } else {
584                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
585                              dpaa_intf->name, fif->mac_type);
586                 return -EINVAL;
587         }
588
589         dev_info->rx_offload_capa = dev_rx_offloads_sup |
590                                         dev_rx_offloads_nodis;
591         dev_info->tx_offload_capa = dev_tx_offloads_sup |
592                                         dev_tx_offloads_nodis;
593         dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
594         dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
595         dev_info->default_rxportconf.nb_queues = 1;
596         dev_info->default_txportconf.nb_queues = 1;
597         dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
598         dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
599
600         return 0;
601 }
602
603 static int
604 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
605                         __rte_unused uint16_t queue_id,
606                         struct rte_eth_burst_mode *mode)
607 {
608         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
609         int ret = -EINVAL;
610         unsigned int i;
611         const struct burst_info {
612                 uint64_t flags;
613                 const char *output;
614         } rx_offload_map[] = {
615                         {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
616                         {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
617                         {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
618                         {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
619                         {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
620                         {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
621                         {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
622         };
623
624         /* Update Rx offload info */
625         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
626                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
627                         snprintf(mode->info, sizeof(mode->info), "%s",
628                                 rx_offload_map[i].output);
629                         ret = 0;
630                         break;
631                 }
632         }
633         return ret;
634 }
635
636 static int
637 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
638                         __rte_unused uint16_t queue_id,
639                         struct rte_eth_burst_mode *mode)
640 {
641         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
642         int ret = -EINVAL;
643         unsigned int i;
644         const struct burst_info {
645                 uint64_t flags;
646                 const char *output;
647         } tx_offload_map[] = {
648                         {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
649                         {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
650                         {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
651                         {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
652                         {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
653                         {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
654                         {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
655                         {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
656         };
657
658         /* Update Tx offload info */
659         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
660                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
661                         snprintf(mode->info, sizeof(mode->info), "%s",
662                                 tx_offload_map[i].output);
663                         ret = 0;
664                         break;
665                 }
666         }
667         return ret;
668 }
669
670 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
671                                 int wait_to_complete)
672 {
673         struct dpaa_if *dpaa_intf = dev->data->dev_private;
674         struct rte_eth_link *link = &dev->data->dev_link;
675         struct fman_if *fif = dev->process_private;
676         struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
677         int ret, ioctl_version;
678         uint8_t count;
679
680         PMD_INIT_FUNC_TRACE();
681
682         ioctl_version = dpaa_get_ioctl_version_number();
683
684         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
685                 for (count = 0; count <= MAX_REPEAT_TIME; count++) {
686                         ret = dpaa_get_link_status(__fif->node_name, link);
687                         if (ret)
688                                 return ret;
689                         if (link->link_status == ETH_LINK_DOWN &&
690                             wait_to_complete)
691                                 rte_delay_ms(CHECK_INTERVAL);
692                         else
693                                 break;
694                 }
695         } else {
696                 link->link_status = dpaa_intf->valid;
697         }
698
699         if (ioctl_version < 2) {
700                 link->link_duplex = ETH_LINK_FULL_DUPLEX;
701                 link->link_autoneg = ETH_LINK_AUTONEG;
702
703                 if (fif->mac_type == fman_mac_1g)
704                         link->link_speed = ETH_SPEED_NUM_1G;
705                 else if (fif->mac_type == fman_mac_2_5g)
706                         link->link_speed = ETH_SPEED_NUM_2_5G;
707                 else if (fif->mac_type == fman_mac_10g)
708                         link->link_speed = ETH_SPEED_NUM_10G;
709                 else
710                         DPAA_PMD_ERR("invalid link_speed: %s, %d",
711                                      dpaa_intf->name, fif->mac_type);
712         }
713
714         DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
715                       link->link_status ? "Up" : "Down");
716         return 0;
717 }
718
719 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
720                                struct rte_eth_stats *stats)
721 {
722         PMD_INIT_FUNC_TRACE();
723
724         fman_if_stats_get(dev->process_private, stats);
725         return 0;
726 }
727
728 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
729 {
730         PMD_INIT_FUNC_TRACE();
731
732         fman_if_stats_reset(dev->process_private);
733
734         return 0;
735 }
736
737 static int
738 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
739                     unsigned int n)
740 {
741         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
742         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
743
744         if (n < num)
745                 return num;
746
747         if (xstats == NULL)
748                 return 0;
749
750         fman_if_stats_get_all(dev->process_private, values,
751                               sizeof(struct dpaa_if_stats) / 8);
752
753         for (i = 0; i < num; i++) {
754                 xstats[i].id = i;
755                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
756         }
757         return i;
758 }
759
760 static int
761 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
762                       struct rte_eth_xstat_name *xstats_names,
763                       unsigned int limit)
764 {
765         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
766
767         if (limit < stat_cnt)
768                 return stat_cnt;
769
770         if (xstats_names != NULL)
771                 for (i = 0; i < stat_cnt; i++)
772                         strlcpy(xstats_names[i].name,
773                                 dpaa_xstats_strings[i].name,
774                                 sizeof(xstats_names[i].name));
775
776         return stat_cnt;
777 }
778
779 static int
780 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
781                       uint64_t *values, unsigned int n)
782 {
783         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
784         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
785
786         if (!ids) {
787                 if (n < stat_cnt)
788                         return stat_cnt;
789
790                 if (!values)
791                         return 0;
792
793                 fman_if_stats_get_all(dev->process_private, values_copy,
794                                       sizeof(struct dpaa_if_stats) / 8);
795
796                 for (i = 0; i < stat_cnt; i++)
797                         values[i] =
798                                 values_copy[dpaa_xstats_strings[i].offset / 8];
799
800                 return stat_cnt;
801         }
802
803         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
804
805         for (i = 0; i < n; i++) {
806                 if (ids[i] >= stat_cnt) {
807                         DPAA_PMD_ERR("id value isn't valid");
808                         return -1;
809                 }
810                 values[i] = values_copy[ids[i]];
811         }
812         return n;
813 }
814
815 static int
816 dpaa_xstats_get_names_by_id(
817         struct rte_eth_dev *dev,
818         struct rte_eth_xstat_name *xstats_names,
819         const uint64_t *ids,
820         unsigned int limit)
821 {
822         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
823         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
824
825         if (!ids)
826                 return dpaa_xstats_get_names(dev, xstats_names, limit);
827
828         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
829
830         for (i = 0; i < limit; i++) {
831                 if (ids[i] >= stat_cnt) {
832                         DPAA_PMD_ERR("id value isn't valid");
833                         return -1;
834                 }
835                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
836         }
837         return limit;
838 }
839
840 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
841 {
842         PMD_INIT_FUNC_TRACE();
843
844         fman_if_promiscuous_enable(dev->process_private);
845
846         return 0;
847 }
848
849 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
850 {
851         PMD_INIT_FUNC_TRACE();
852
853         fman_if_promiscuous_disable(dev->process_private);
854
855         return 0;
856 }
857
858 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
859 {
860         PMD_INIT_FUNC_TRACE();
861
862         fman_if_set_mcast_filter_table(dev->process_private);
863
864         return 0;
865 }
866
867 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
868 {
869         PMD_INIT_FUNC_TRACE();
870
871         fman_if_reset_mcast_filter_table(dev->process_private);
872
873         return 0;
874 }
875
876 static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
877 {
878         struct dpaa_if *dpaa_intf = dev->data->dev_private;
879         struct fman_if_ic_params icp;
880         uint32_t fd_offset;
881         uint32_t bp_size;
882
883         memset(&icp, 0, sizeof(icp));
884         /* set ICEOF for to the default value , which is 0*/
885         icp.iciof = DEFAULT_ICIOF;
886         icp.iceof = DEFAULT_RX_ICEOF;
887         icp.icsz = DEFAULT_ICSZ;
888         fman_if_set_ic_params(dev->process_private, &icp);
889
890         fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
891         fman_if_set_fdoff(dev->process_private, fd_offset);
892
893         /* Buffer pool size should be equal to Dataroom Size*/
894         bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
895
896         fman_if_set_bp(dev->process_private,
897                        dpaa_intf->bp_info->mp->size,
898                        dpaa_intf->bp_info->bpid, bp_size);
899 }
900
901 static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
902                                              int8_t vsp_id, uint32_t bpid)
903 {
904         struct dpaa_if *dpaa_intf = dev->data->dev_private;
905         struct fman_if *fif = dev->process_private;
906
907         if (fif->num_profiles) {
908                 if (vsp_id < 0)
909                         vsp_id = fif->base_profile_id;
910         } else {
911                 if (vsp_id < 0)
912                         vsp_id = 0;
913         }
914
915         if (dpaa_intf->vsp_bpid[vsp_id] &&
916                 bpid != dpaa_intf->vsp_bpid[vsp_id]) {
917                 DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
918
919                 return -1;
920         }
921
922         return 0;
923 }
924
925 static
926 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
927                             uint16_t nb_desc,
928                             unsigned int socket_id __rte_unused,
929                             const struct rte_eth_rxconf *rx_conf,
930                             struct rte_mempool *mp)
931 {
932         struct dpaa_if *dpaa_intf = dev->data->dev_private;
933         struct fman_if *fif = dev->process_private;
934         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
935         struct qm_mcc_initfq opts = {0};
936         u32 flags = 0;
937         int ret;
938         u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
939
940         PMD_INIT_FUNC_TRACE();
941
942         if (queue_idx >= dev->data->nb_rx_queues) {
943                 rte_errno = EOVERFLOW;
944                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
945                       (void *)dev, queue_idx, dev->data->nb_rx_queues);
946                 return -rte_errno;
947         }
948
949         /* Rx deferred start is not supported */
950         if (rx_conf->rx_deferred_start) {
951                 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
952                 return -EINVAL;
953         }
954         rxq->nb_desc = UINT16_MAX;
955         rxq->offloads = rx_conf->offloads;
956
957         DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
958                         queue_idx, rxq->fqid);
959
960         if (!fif->num_profiles) {
961                 if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
962                         dpaa_intf->bp_info->mp != mp) {
963                         DPAA_PMD_WARN("Multiple pools on same interface not"
964                                       " supported");
965                         return -EINVAL;
966                 }
967         } else {
968                 if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
969                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
970                         return -EINVAL;
971                 }
972         }
973
974         if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
975             dpaa_intf->bp_info->mp != mp) {
976                 DPAA_PMD_WARN("Multiple pools on same interface not supported");
977                 return -EINVAL;
978         }
979
980         /* Max packet can fit in single buffer */
981         if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
982                 ;
983         } else if (dev->data->dev_conf.rxmode.offloads &
984                         DEV_RX_OFFLOAD_SCATTER) {
985                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
986                         buffsz * DPAA_SGT_MAX_ENTRIES) {
987                         DPAA_PMD_ERR("max RxPkt size %d too big to fit "
988                                 "MaxSGlist %d",
989                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
990                                 buffsz * DPAA_SGT_MAX_ENTRIES);
991                         rte_errno = EOVERFLOW;
992                         return -rte_errno;
993                 }
994         } else {
995                 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
996                      " larger than a single mbuf (%u) and scattered"
997                      " mode has not been requested",
998                      dev->data->dev_conf.rxmode.max_rx_pkt_len,
999                      buffsz - RTE_PKTMBUF_HEADROOM);
1000         }
1001
1002         dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1003
1004         /* For shared interface, it's done in kernel, skip.*/
1005         if (!fif->is_shared_mac)
1006                 dpaa_fman_if_pool_setup(dev);
1007
1008         if (fif->num_profiles) {
1009                 int8_t vsp_id = rxq->vsp_id;
1010
1011                 if (vsp_id >= 0) {
1012                         ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
1013                                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
1014                                         fif);
1015                         if (ret) {
1016                                 DPAA_PMD_ERR("dpaa_port_vsp_update failed");
1017                                 return ret;
1018                         }
1019                 } else {
1020                         DPAA_PMD_INFO("Base profile is associated to"
1021                                 " RXQ fqid:%d\r\n", rxq->fqid);
1022                         if (fif->is_shared_mac) {
1023                                 DPAA_PMD_ERR("Fatal: Base profile is associated"
1024                                              " to shared interface on DPDK.");
1025                                 return -EINVAL;
1026                         }
1027                         dpaa_intf->vsp_bpid[fif->base_profile_id] =
1028                                 DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1029                 }
1030         } else {
1031                 dpaa_intf->vsp_bpid[0] =
1032                         DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
1033         }
1034
1035         dpaa_intf->valid = 1;
1036         DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
1037                 fman_if_get_sg_enable(fif),
1038                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1039         /* checking if push mode only, no error check for now */
1040         if (!rxq->is_static &&
1041             dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
1042                 struct qman_portal *qp;
1043                 int q_fd;
1044
1045                 dpaa_push_queue_idx++;
1046                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
1047                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
1048                                    QM_FQCTRL_CTXASTASHING |
1049                                    QM_FQCTRL_PREFERINCACHE;
1050                 opts.fqd.context_a.stashing.exclusive = 0;
1051                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
1052                  * So do not enable stashing in this case
1053                  */
1054                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
1055                         opts.fqd.context_a.stashing.annotation_cl =
1056                                                 DPAA_IF_RX_ANNOTATION_STASH;
1057                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
1058                 opts.fqd.context_a.stashing.context_cl =
1059                                                 DPAA_IF_RX_CONTEXT_STASH;
1060
1061                 /*Create a channel and associate given queue with the channel*/
1062                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
1063                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1064                 opts.fqd.dest.channel = rxq->ch_id;
1065                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
1066                 flags = QMAN_INITFQ_FLAG_SCHED;
1067
1068                 /* Configure tail drop */
1069                 if (dpaa_intf->cgr_rx) {
1070                         opts.we_mask |= QM_INITFQ_WE_CGID;
1071                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
1072                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1073                 }
1074                 ret = qman_init_fq(rxq, flags, &opts);
1075                 if (ret) {
1076                         DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
1077                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1078                         return ret;
1079                 }
1080                 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
1081                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
1082                 } else {
1083                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
1084                         rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
1085                 }
1086
1087                 rxq->is_static = true;
1088
1089                 /* Allocate qman specific portals */
1090                 qp = fsl_qman_fq_portal_create(&q_fd);
1091                 if (!qp) {
1092                         DPAA_PMD_ERR("Unable to alloc fq portal");
1093                         return -1;
1094                 }
1095                 rxq->qp = qp;
1096
1097                 /* Set up the device interrupt handler */
1098                 if (!dev->intr_handle) {
1099                         struct rte_dpaa_device *dpaa_dev;
1100                         struct rte_device *rdev = dev->device;
1101
1102                         dpaa_dev = container_of(rdev, struct rte_dpaa_device,
1103                                                 device);
1104                         dev->intr_handle = &dpaa_dev->intr_handle;
1105                         dev->intr_handle->intr_vec = rte_zmalloc(NULL,
1106                                         dpaa_push_mode_max_queue, 0);
1107                         if (!dev->intr_handle->intr_vec) {
1108                                 DPAA_PMD_ERR("intr_vec alloc failed");
1109                                 return -ENOMEM;
1110                         }
1111                         dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
1112                         dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
1113                 }
1114
1115                 dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
1116                 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
1117                 dev->intr_handle->efds[queue_idx] = q_fd;
1118                 rxq->q_fd = q_fd;
1119         }
1120         rxq->bp_array = rte_dpaa_bpid_info;
1121         dev->data->rx_queues[queue_idx] = rxq;
1122
1123         /* configure the CGR size as per the desc size */
1124         if (dpaa_intf->cgr_rx) {
1125                 struct qm_mcc_initcgr cgr_opts = {0};
1126
1127                 rxq->nb_desc = nb_desc;
1128                 /* Enable tail drop with cgr on this queue */
1129                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
1130                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
1131                 if (ret) {
1132                         DPAA_PMD_WARN(
1133                                 "rx taildrop modify fail on fqid %d (ret=%d)",
1134                                 rxq->fqid, ret);
1135                 }
1136         }
1137         /* Enable main queue to receive error packets also by default */
1138         fman_if_set_err_fqid(fif, rxq->fqid);
1139         return 0;
1140 }
1141
1142 int
1143 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
1144                 int eth_rx_queue_id,
1145                 u16 ch_id,
1146                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1147 {
1148         int ret;
1149         u32 flags = 0;
1150         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1151         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1152         struct qm_mcc_initfq opts = {0};
1153
1154         if (dpaa_push_mode_max_queue)
1155                 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
1156                               "PUSH mode already enabled for first %d queues.\n"
1157                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
1158                               dpaa_push_mode_max_queue);
1159
1160         dpaa_poll_queue_default_config(&opts);
1161
1162         switch (queue_conf->ev.sched_type) {
1163         case RTE_SCHED_TYPE_ATOMIC:
1164                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
1165                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
1166                  * configuration with HOLD_ACTIVE setting
1167                  */
1168                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
1169                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
1170                 break;
1171         case RTE_SCHED_TYPE_ORDERED:
1172                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
1173                 return -1;
1174         default:
1175                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
1176                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
1177                 break;
1178         }
1179
1180         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
1181         opts.fqd.dest.channel = ch_id;
1182         opts.fqd.dest.wq = queue_conf->ev.priority;
1183
1184         if (dpaa_intf->cgr_rx) {
1185                 opts.we_mask |= QM_INITFQ_WE_CGID;
1186                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1187                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1188         }
1189
1190         flags = QMAN_INITFQ_FLAG_SCHED;
1191
1192         ret = qman_init_fq(rxq, flags, &opts);
1193         if (ret) {
1194                 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
1195                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
1196                 return ret;
1197         }
1198
1199         /* copy configuration which needs to be filled during dequeue */
1200         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
1201         dev->data->rx_queues[eth_rx_queue_id] = rxq;
1202
1203         return ret;
1204 }
1205
1206 int
1207 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
1208                 int eth_rx_queue_id)
1209 {
1210         struct qm_mcc_initfq opts;
1211         int ret;
1212         u32 flags = 0;
1213         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1214         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
1215
1216         dpaa_poll_queue_default_config(&opts);
1217
1218         if (dpaa_intf->cgr_rx) {
1219                 opts.we_mask |= QM_INITFQ_WE_CGID;
1220                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
1221                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1222         }
1223
1224         ret = qman_init_fq(rxq, flags, &opts);
1225         if (ret) {
1226                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
1227                              rxq->fqid, ret);
1228         }
1229
1230         rxq->cb.dqrr_dpdk_cb = NULL;
1231         dev->data->rx_queues[eth_rx_queue_id] = NULL;
1232
1233         return 0;
1234 }
1235
1236 static
1237 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
1238 {
1239         PMD_INIT_FUNC_TRACE();
1240 }
1241
1242 static
1243 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1244                             uint16_t nb_desc __rte_unused,
1245                 unsigned int socket_id __rte_unused,
1246                 const struct rte_eth_txconf *tx_conf)
1247 {
1248         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1249         struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
1250
1251         PMD_INIT_FUNC_TRACE();
1252
1253         /* Tx deferred start is not supported */
1254         if (tx_conf->tx_deferred_start) {
1255                 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1256                 return -EINVAL;
1257         }
1258         txq->nb_desc = UINT16_MAX;
1259         txq->offloads = tx_conf->offloads;
1260
1261         if (queue_idx >= dev->data->nb_tx_queues) {
1262                 rte_errno = EOVERFLOW;
1263                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1264                       (void *)dev, queue_idx, dev->data->nb_tx_queues);
1265                 return -rte_errno;
1266         }
1267
1268         DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1269                         queue_idx, txq->fqid);
1270         dev->data->tx_queues[queue_idx] = txq;
1271
1272         return 0;
1273 }
1274
1275 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
1276 {
1277         PMD_INIT_FUNC_TRACE();
1278 }
1279
1280 static uint32_t
1281 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1282 {
1283         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1284         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
1285         u32 frm_cnt = 0;
1286
1287         PMD_INIT_FUNC_TRACE();
1288
1289         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1290                 DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
1291                                rx_queue_id, frm_cnt);
1292         }
1293         return frm_cnt;
1294 }
1295
1296 static int dpaa_link_down(struct rte_eth_dev *dev)
1297 {
1298         struct fman_if *fif = dev->process_private;
1299         struct __fman_if *__fif;
1300
1301         PMD_INIT_FUNC_TRACE();
1302
1303         __fif = container_of(fif, struct __fman_if, __if);
1304
1305         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1306                 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1307         else
1308                 return dpaa_eth_dev_stop(dev);
1309         return 0;
1310 }
1311
1312 static int dpaa_link_up(struct rte_eth_dev *dev)
1313 {
1314         struct fman_if *fif = dev->process_private;
1315         struct __fman_if *__fif;
1316
1317         PMD_INIT_FUNC_TRACE();
1318
1319         __fif = container_of(fif, struct __fman_if, __if);
1320
1321         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1322                 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1323         else
1324                 dpaa_eth_dev_start(dev);
1325         return 0;
1326 }
1327
1328 static int
1329 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1330                    struct rte_eth_fc_conf *fc_conf)
1331 {
1332         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1333         struct rte_eth_fc_conf *net_fc;
1334
1335         PMD_INIT_FUNC_TRACE();
1336
1337         if (!(dpaa_intf->fc_conf)) {
1338                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1339                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1340                 if (!dpaa_intf->fc_conf) {
1341                         DPAA_PMD_ERR("unable to save flow control info");
1342                         return -ENOMEM;
1343                 }
1344         }
1345         net_fc = dpaa_intf->fc_conf;
1346
1347         if (fc_conf->high_water < fc_conf->low_water) {
1348                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1349                 return -EINVAL;
1350         }
1351
1352         if (fc_conf->mode == RTE_FC_NONE) {
1353                 return 0;
1354         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
1355                  fc_conf->mode == RTE_FC_FULL) {
1356                 fman_if_set_fc_threshold(dev->process_private,
1357                                          fc_conf->high_water,
1358                                          fc_conf->low_water,
1359                                          dpaa_intf->bp_info->bpid);
1360                 if (fc_conf->pause_time)
1361                         fman_if_set_fc_quanta(dev->process_private,
1362                                               fc_conf->pause_time);
1363         }
1364
1365         /* Save the information in dpaa device */
1366         net_fc->pause_time = fc_conf->pause_time;
1367         net_fc->high_water = fc_conf->high_water;
1368         net_fc->low_water = fc_conf->low_water;
1369         net_fc->send_xon = fc_conf->send_xon;
1370         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1371         net_fc->mode = fc_conf->mode;
1372         net_fc->autoneg = fc_conf->autoneg;
1373
1374         return 0;
1375 }
1376
1377 static int
1378 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1379                    struct rte_eth_fc_conf *fc_conf)
1380 {
1381         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1382         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1383         int ret;
1384
1385         PMD_INIT_FUNC_TRACE();
1386
1387         if (net_fc) {
1388                 fc_conf->pause_time = net_fc->pause_time;
1389                 fc_conf->high_water = net_fc->high_water;
1390                 fc_conf->low_water = net_fc->low_water;
1391                 fc_conf->send_xon = net_fc->send_xon;
1392                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1393                 fc_conf->mode = net_fc->mode;
1394                 fc_conf->autoneg = net_fc->autoneg;
1395                 return 0;
1396         }
1397         ret = fman_if_get_fc_threshold(dev->process_private);
1398         if (ret) {
1399                 fc_conf->mode = RTE_FC_TX_PAUSE;
1400                 fc_conf->pause_time =
1401                         fman_if_get_fc_quanta(dev->process_private);
1402         } else {
1403                 fc_conf->mode = RTE_FC_NONE;
1404         }
1405
1406         return 0;
1407 }
1408
1409 static int
1410 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1411                              struct rte_ether_addr *addr,
1412                              uint32_t index,
1413                              __rte_unused uint32_t pool)
1414 {
1415         int ret;
1416
1417         PMD_INIT_FUNC_TRACE();
1418
1419         ret = fman_if_add_mac_addr(dev->process_private,
1420                                    addr->addr_bytes, index);
1421
1422         if (ret)
1423                 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1424         return 0;
1425 }
1426
1427 static void
1428 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1429                           uint32_t index)
1430 {
1431         PMD_INIT_FUNC_TRACE();
1432
1433         fman_if_clear_mac_addr(dev->process_private, index);
1434 }
1435
1436 static int
1437 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1438                        struct rte_ether_addr *addr)
1439 {
1440         int ret;
1441
1442         PMD_INIT_FUNC_TRACE();
1443
1444         ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1445         if (ret)
1446                 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1447
1448         return ret;
1449 }
1450
1451 static int
1452 dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
1453                          struct rte_eth_rss_conf *rss_conf)
1454 {
1455         struct rte_eth_dev_data *data = dev->data;
1456         struct rte_eth_conf *eth_conf = &data->dev_conf;
1457
1458         PMD_INIT_FUNC_TRACE();
1459
1460         if (!(default_q || fmc_q)) {
1461                 if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
1462                         DPAA_PMD_ERR("FM port configuration: Failed\n");
1463                         return -1;
1464                 }
1465                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1466         } else {
1467                 DPAA_PMD_ERR("Function not supported\n");
1468                 return -ENOTSUP;
1469         }
1470         return 0;
1471 }
1472
1473 static int
1474 dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1475                            struct rte_eth_rss_conf *rss_conf)
1476 {
1477         struct rte_eth_dev_data *data = dev->data;
1478         struct rte_eth_conf *eth_conf = &data->dev_conf;
1479
1480         /* dpaa does not support rss_key, so length should be 0*/
1481         rss_conf->rss_key_len = 0;
1482         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1483         return 0;
1484 }
1485
1486 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1487                                       uint16_t queue_id)
1488 {
1489         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1490         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1491
1492         if (!rxq->is_static)
1493                 return -EINVAL;
1494
1495         return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1496 }
1497
1498 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1499                                        uint16_t queue_id)
1500 {
1501         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1502         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1503         uint32_t temp;
1504         ssize_t temp1;
1505
1506         if (!rxq->is_static)
1507                 return -EINVAL;
1508
1509         qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1510
1511         temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1512         if (temp1 != sizeof(temp))
1513                 DPAA_PMD_ERR("irq read error");
1514
1515         qman_fq_portal_thread_irq(rxq->qp);
1516
1517         return 0;
1518 }
1519
1520 static void
1521 dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1522         struct rte_eth_rxq_info *qinfo)
1523 {
1524         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1525         struct qman_fq *rxq;
1526         int ret;
1527
1528         rxq = dev->data->rx_queues[queue_id];
1529
1530         qinfo->mp = dpaa_intf->bp_info->mp;
1531         qinfo->scattered_rx = dev->data->scattered_rx;
1532         qinfo->nb_desc = rxq->nb_desc;
1533
1534         /* Report the HW Rx buffer length to user */
1535         ret = fman_if_get_maxfrm(dev->process_private);
1536         if (ret > 0)
1537                 qinfo->rx_buf_size = ret;
1538
1539         qinfo->conf.rx_free_thresh = 1;
1540         qinfo->conf.rx_drop_en = 1;
1541         qinfo->conf.rx_deferred_start = 0;
1542         qinfo->conf.offloads = rxq->offloads;
1543 }
1544
1545 static void
1546 dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1547         struct rte_eth_txq_info *qinfo)
1548 {
1549         struct qman_fq *txq;
1550
1551         txq = dev->data->tx_queues[queue_id];
1552
1553         qinfo->nb_desc = txq->nb_desc;
1554         qinfo->conf.tx_thresh.pthresh = 0;
1555         qinfo->conf.tx_thresh.hthresh = 0;
1556         qinfo->conf.tx_thresh.wthresh = 0;
1557
1558         qinfo->conf.tx_free_thresh = 0;
1559         qinfo->conf.tx_rs_thresh = 0;
1560         qinfo->conf.offloads = txq->offloads;
1561         qinfo->conf.tx_deferred_start = 0;
1562 }
1563
1564 static struct eth_dev_ops dpaa_devops = {
1565         .dev_configure            = dpaa_eth_dev_configure,
1566         .dev_start                = dpaa_eth_dev_start,
1567         .dev_stop                 = dpaa_eth_dev_stop,
1568         .dev_close                = dpaa_eth_dev_close,
1569         .dev_infos_get            = dpaa_eth_dev_info,
1570         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1571
1572         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
1573         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
1574         .rx_queue_release         = dpaa_eth_rx_queue_release,
1575         .tx_queue_release         = dpaa_eth_tx_queue_release,
1576         .rx_burst_mode_get        = dpaa_dev_rx_burst_mode_get,
1577         .tx_burst_mode_get        = dpaa_dev_tx_burst_mode_get,
1578         .rxq_info_get             = dpaa_rxq_info_get,
1579         .txq_info_get             = dpaa_txq_info_get,
1580
1581         .flow_ctrl_get            = dpaa_flow_ctrl_get,
1582         .flow_ctrl_set            = dpaa_flow_ctrl_set,
1583
1584         .link_update              = dpaa_eth_link_update,
1585         .stats_get                = dpaa_eth_stats_get,
1586         .xstats_get               = dpaa_dev_xstats_get,
1587         .xstats_get_by_id         = dpaa_xstats_get_by_id,
1588         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
1589         .xstats_get_names         = dpaa_xstats_get_names,
1590         .xstats_reset             = dpaa_eth_stats_reset,
1591         .stats_reset              = dpaa_eth_stats_reset,
1592         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
1593         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
1594         .allmulticast_enable      = dpaa_eth_multicast_enable,
1595         .allmulticast_disable     = dpaa_eth_multicast_disable,
1596         .mtu_set                  = dpaa_mtu_set,
1597         .dev_set_link_down        = dpaa_link_down,
1598         .dev_set_link_up          = dpaa_link_up,
1599         .mac_addr_add             = dpaa_dev_add_mac_addr,
1600         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
1601         .mac_addr_set             = dpaa_dev_set_mac_addr,
1602
1603         .fw_version_get           = dpaa_fw_version_get,
1604
1605         .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
1606         .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
1607         .rss_hash_update          = dpaa_dev_rss_hash_update,
1608         .rss_hash_conf_get        = dpaa_dev_rss_hash_conf_get,
1609 };
1610
1611 static bool
1612 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1613 {
1614         if (strcmp(dev->device->driver->name,
1615                    drv->driver.name))
1616                 return false;
1617
1618         return true;
1619 }
1620
1621 static bool
1622 is_dpaa_supported(struct rte_eth_dev *dev)
1623 {
1624         return is_device_supported(dev, &rte_dpaa_pmd);
1625 }
1626
1627 int
1628 rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
1629 {
1630         struct rte_eth_dev *dev;
1631
1632         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1633
1634         dev = &rte_eth_devices[port];
1635
1636         if (!is_dpaa_supported(dev))
1637                 return -ENOTSUP;
1638
1639         if (on)
1640                 fman_if_loopback_enable(dev->process_private);
1641         else
1642                 fman_if_loopback_disable(dev->process_private);
1643
1644         return 0;
1645 }
1646
1647 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1648                                struct fman_if *fman_intf)
1649 {
1650         struct rte_eth_fc_conf *fc_conf;
1651         int ret;
1652
1653         PMD_INIT_FUNC_TRACE();
1654
1655         if (!(dpaa_intf->fc_conf)) {
1656                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1657                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1658                 if (!dpaa_intf->fc_conf) {
1659                         DPAA_PMD_ERR("unable to save flow control info");
1660                         return -ENOMEM;
1661                 }
1662         }
1663         fc_conf = dpaa_intf->fc_conf;
1664         ret = fman_if_get_fc_threshold(fman_intf);
1665         if (ret) {
1666                 fc_conf->mode = RTE_FC_TX_PAUSE;
1667                 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1668         } else {
1669                 fc_conf->mode = RTE_FC_NONE;
1670         }
1671
1672         return 0;
1673 }
1674
1675 /* Initialise an Rx FQ */
1676 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1677                               uint32_t fqid)
1678 {
1679         struct qm_mcc_initfq opts = {0};
1680         int ret;
1681         u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1682         struct qm_mcc_initcgr cgr_opts = {
1683                 .we_mask = QM_CGR_WE_CS_THRES |
1684                                 QM_CGR_WE_CSTD_EN |
1685                                 QM_CGR_WE_MODE,
1686                 .cgr = {
1687                         .cstd_en = QM_CGR_EN,
1688                         .mode = QMAN_CGR_MODE_FRAME
1689                 }
1690         };
1691
1692         if (fmc_q || default_q) {
1693                 ret = qman_reserve_fqid(fqid);
1694                 if (ret) {
1695                         DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
1696                                      fqid, ret);
1697                         return -EINVAL;
1698                 }
1699         }
1700
1701         DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1702         ret = qman_create_fq(fqid, flags, fq);
1703         if (ret) {
1704                 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1705                         fqid, ret);
1706                 return ret;
1707         }
1708         fq->is_static = false;
1709
1710         dpaa_poll_queue_default_config(&opts);
1711
1712         if (cgr_rx) {
1713                 /* Enable tail drop with cgr on this queue */
1714                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1715                 cgr_rx->cb = NULL;
1716                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1717                                       &cgr_opts);
1718                 if (ret) {
1719                         DPAA_PMD_WARN(
1720                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1721                                 fq->fqid, ret);
1722                         goto without_cgr;
1723                 }
1724                 opts.we_mask |= QM_INITFQ_WE_CGID;
1725                 opts.fqd.cgid = cgr_rx->cgrid;
1726                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1727         }
1728 without_cgr:
1729         ret = qman_init_fq(fq, 0, &opts);
1730         if (ret)
1731                 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1732         return ret;
1733 }
1734
1735 /* Initialise a Tx FQ */
1736 static int dpaa_tx_queue_init(struct qman_fq *fq,
1737                               struct fman_if *fman_intf,
1738                               struct qman_cgr *cgr_tx)
1739 {
1740         struct qm_mcc_initfq opts = {0};
1741         struct qm_mcc_initcgr cgr_opts = {
1742                 .we_mask = QM_CGR_WE_CS_THRES |
1743                                 QM_CGR_WE_CSTD_EN |
1744                                 QM_CGR_WE_MODE,
1745                 .cgr = {
1746                         .cstd_en = QM_CGR_EN,
1747                         .mode = QMAN_CGR_MODE_FRAME
1748                 }
1749         };
1750         int ret;
1751
1752         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1753                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1754         if (ret) {
1755                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1756                 return ret;
1757         }
1758         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1759                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1760         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1761         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1762         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1763         opts.fqd.context_b = 0;
1764         /* no tx-confirmation */
1765         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1766         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1767         DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1768
1769         if (cgr_tx) {
1770                 /* Enable tail drop with cgr on this queue */
1771                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1772                                       td_tx_threshold, 0);
1773                 cgr_tx->cb = NULL;
1774                 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1775                                       &cgr_opts);
1776                 if (ret) {
1777                         DPAA_PMD_WARN(
1778                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1779                                 fq->fqid, ret);
1780                         goto without_cgr;
1781                 }
1782                 opts.we_mask |= QM_INITFQ_WE_CGID;
1783                 opts.fqd.cgid = cgr_tx->cgrid;
1784                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1785                 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1786                                 td_tx_threshold);
1787         }
1788 without_cgr:
1789         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1790         if (ret)
1791                 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1792         return ret;
1793 }
1794
1795 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1796 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1797 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1798 {
1799         struct qm_mcc_initfq opts = {0};
1800         int ret;
1801
1802         PMD_INIT_FUNC_TRACE();
1803
1804         ret = qman_reserve_fqid(fqid);
1805         if (ret) {
1806                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1807                         fqid, ret);
1808                 return -EINVAL;
1809         }
1810         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1811         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1812         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1813         if (ret) {
1814                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1815                         fqid, ret);
1816                 return ret;
1817         }
1818         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1819         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1820         ret = qman_init_fq(fq, 0, &opts);
1821         if (ret)
1822                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1823                             fqid, ret);
1824         return ret;
1825 }
1826 #endif
1827
1828 /* Initialise a network interface */
1829 static int
1830 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1831 {
1832         struct rte_dpaa_device *dpaa_device;
1833         struct fm_eth_port_cfg *cfg;
1834         struct dpaa_if *dpaa_intf;
1835         struct fman_if *fman_intf;
1836         int dev_id;
1837
1838         PMD_INIT_FUNC_TRACE();
1839
1840         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1841         dev_id = dpaa_device->id.dev_id;
1842         cfg = dpaa_get_eth_port_cfg(dev_id);
1843         fman_intf = cfg->fman_if;
1844         eth_dev->process_private = fman_intf;
1845
1846         /* Plugging of UCODE burst API not supported in Secondary */
1847         dpaa_intf = eth_dev->data->dev_private;
1848         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1849         if (dpaa_intf->cgr_tx)
1850                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1851         else
1852                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1853 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1854         qman_set_fq_lookup_table(
1855                 dpaa_intf->rx_queues->qman_fq_lookup_table);
1856 #endif
1857
1858         return 0;
1859 }
1860
1861 /* Initialise a network interface */
1862 static int
1863 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1864 {
1865         int num_rx_fqs, fqid;
1866         int loop, ret = 0;
1867         int dev_id;
1868         struct rte_dpaa_device *dpaa_device;
1869         struct dpaa_if *dpaa_intf;
1870         struct fm_eth_port_cfg *cfg;
1871         struct fman_if *fman_intf;
1872         struct fman_if_bpool *bp, *tmp_bp;
1873         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1874         uint32_t cgrid_tx[MAX_DPAA_CORES];
1875         uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
1876         int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
1877         int8_t vsp_id = -1;
1878
1879         PMD_INIT_FUNC_TRACE();
1880
1881         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1882         dev_id = dpaa_device->id.dev_id;
1883         dpaa_intf = eth_dev->data->dev_private;
1884         cfg = dpaa_get_eth_port_cfg(dev_id);
1885         fman_intf = cfg->fman_if;
1886
1887         dpaa_intf->name = dpaa_device->name;
1888
1889         /* save fman_if & cfg in the interface struture */
1890         eth_dev->process_private = fman_intf;
1891         dpaa_intf->ifid = dev_id;
1892         dpaa_intf->cfg = cfg;
1893
1894         memset((char *)dev_rx_fqids, 0,
1895                 sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
1896
1897         memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
1898
1899         /* Initialize Rx FQ's */
1900         if (default_q) {
1901                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1902         } else if (fmc_q) {
1903                 num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
1904                                                 dev_vspids,
1905                                                 DPAA_MAX_NUM_PCD_QUEUES);
1906                 if (num_rx_fqs < 0) {
1907                         DPAA_PMD_ERR("%s FMC initializes failed!",
1908                                 dpaa_intf->name);
1909                         goto free_rx;
1910                 }
1911                 if (!num_rx_fqs) {
1912                         DPAA_PMD_WARN("%s is not configured by FMC.",
1913                                 dpaa_intf->name);
1914                 }
1915         } else {
1916                 /* FMCLESS mode, load balance to multiple cores.*/
1917                 num_rx_fqs = rte_lcore_count();
1918         }
1919
1920         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1921          * queues.
1922          */
1923         if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1924                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1925                 return -EINVAL;
1926         }
1927
1928         if (num_rx_fqs > 0) {
1929                 dpaa_intf->rx_queues = rte_zmalloc(NULL,
1930                         sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1931                 if (!dpaa_intf->rx_queues) {
1932                         DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1933                         return -ENOMEM;
1934                 }
1935         } else {
1936                 dpaa_intf->rx_queues = NULL;
1937         }
1938
1939         memset(cgrid, 0, sizeof(cgrid));
1940         memset(cgrid_tx, 0, sizeof(cgrid_tx));
1941
1942         /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1943          * Tx tail drop is disabled.
1944          */
1945         if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1946                 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1947                 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1948                                td_tx_threshold);
1949                 /* if a very large value is being configured */
1950                 if (td_tx_threshold > UINT16_MAX)
1951                         td_tx_threshold = CGR_RX_PERFQ_THRESH;
1952         }
1953
1954         /* If congestion control is enabled globally*/
1955         if (num_rx_fqs > 0 && td_threshold) {
1956                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1957                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1958                 if (!dpaa_intf->cgr_rx) {
1959                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1960                         ret = -ENOMEM;
1961                         goto free_rx;
1962                 }
1963
1964                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1965                 if (ret != num_rx_fqs) {
1966                         DPAA_PMD_WARN("insufficient CGRIDs available");
1967                         ret = -EINVAL;
1968                         goto free_rx;
1969                 }
1970         } else {
1971                 dpaa_intf->cgr_rx = NULL;
1972         }
1973
1974         if (!fmc_q && !default_q) {
1975                 ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
1976                                             num_rx_fqs, 0);
1977                 if (ret < 0) {
1978                         DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
1979                         goto free_rx;
1980                 }
1981         }
1982
1983         for (loop = 0; loop < num_rx_fqs; loop++) {
1984                 if (default_q)
1985                         fqid = cfg->rx_def;
1986                 else
1987                         fqid = dev_rx_fqids[loop];
1988
1989                 vsp_id = dev_vspids[loop];
1990
1991                 if (dpaa_intf->cgr_rx)
1992                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1993
1994                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1995                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1996                         fqid);
1997                 if (ret)
1998                         goto free_rx;
1999                 dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
2000                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
2001         }
2002         dpaa_intf->nb_rx_queues = num_rx_fqs;
2003
2004         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
2005         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
2006                 MAX_DPAA_CORES, MAX_CACHELINE);
2007         if (!dpaa_intf->tx_queues) {
2008                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
2009                 ret = -ENOMEM;
2010                 goto free_rx;
2011         }
2012
2013         /* If congestion control is enabled globally*/
2014         if (td_tx_threshold) {
2015                 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
2016                         sizeof(struct qman_cgr) * MAX_DPAA_CORES,
2017                         MAX_CACHELINE);
2018                 if (!dpaa_intf->cgr_tx) {
2019                         DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
2020                         ret = -ENOMEM;
2021                         goto free_rx;
2022                 }
2023
2024                 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
2025                                              1, 0);
2026                 if (ret != MAX_DPAA_CORES) {
2027                         DPAA_PMD_WARN("insufficient CGRIDs available");
2028                         ret = -EINVAL;
2029                         goto free_rx;
2030                 }
2031         } else {
2032                 dpaa_intf->cgr_tx = NULL;
2033         }
2034
2035
2036         for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
2037                 if (dpaa_intf->cgr_tx)
2038                         dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
2039
2040                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
2041                         fman_intf,
2042                         dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
2043                 if (ret)
2044                         goto free_tx;
2045                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
2046         }
2047         dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
2048
2049 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
2050         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2051                         [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
2052         if (ret) {
2053                 DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
2054                 goto free_tx;
2055         }
2056         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
2057         ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
2058                         [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
2059         if (ret) {
2060                 DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
2061                 goto free_tx;
2062         }
2063         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
2064 #endif
2065
2066         DPAA_PMD_DEBUG("All frame queues created");
2067
2068         /* Get the initial configuration for flow control */
2069         dpaa_fc_set_default(dpaa_intf, fman_intf);
2070
2071         /* reset bpool list, initialize bpool dynamically */
2072         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
2073                 list_del(&bp->node);
2074                 rte_free(bp);
2075         }
2076
2077         /* Populate ethdev structure */
2078         eth_dev->dev_ops = &dpaa_devops;
2079         eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
2080         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
2081         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
2082
2083         /* Allocate memory for storing MAC addresses */
2084         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
2085                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
2086         if (eth_dev->data->mac_addrs == NULL) {
2087                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
2088                                                 "store MAC addresses",
2089                                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
2090                 ret = -ENOMEM;
2091                 goto free_tx;
2092         }
2093
2094         /* copy the primary mac address */
2095         rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
2096
2097         RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
2098                 dpaa_device->name,
2099                 fman_intf->mac_addr.addr_bytes[0],
2100                 fman_intf->mac_addr.addr_bytes[1],
2101                 fman_intf->mac_addr.addr_bytes[2],
2102                 fman_intf->mac_addr.addr_bytes[3],
2103                 fman_intf->mac_addr.addr_bytes[4],
2104                 fman_intf->mac_addr.addr_bytes[5]);
2105
2106         if (!fman_intf->is_shared_mac) {
2107                 /* Configure error packet handling */
2108                 fman_if_receive_rx_errors(fman_intf,
2109                         FM_FD_RX_STATUS_ERR_MASK);
2110                 /* Disable RX mode */
2111                 fman_if_disable_rx(fman_intf);
2112                 /* Disable promiscuous mode */
2113                 fman_if_promiscuous_disable(fman_intf);
2114                 /* Disable multicast */
2115                 fman_if_reset_mcast_filter_table(fman_intf);
2116                 /* Reset interface statistics */
2117                 fman_if_stats_reset(fman_intf);
2118                 /* Disable SG by default */
2119                 fman_if_set_sg(fman_intf, 0);
2120                 fman_if_set_maxfrm(fman_intf,
2121                                    RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
2122         }
2123
2124         return 0;
2125
2126 free_tx:
2127         rte_free(dpaa_intf->tx_queues);
2128         dpaa_intf->tx_queues = NULL;
2129         dpaa_intf->nb_tx_queues = 0;
2130
2131 free_rx:
2132         rte_free(dpaa_intf->cgr_rx);
2133         rte_free(dpaa_intf->cgr_tx);
2134         rte_free(dpaa_intf->rx_queues);
2135         dpaa_intf->rx_queues = NULL;
2136         dpaa_intf->nb_rx_queues = 0;
2137         return ret;
2138 }
2139
2140 static int
2141 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
2142                struct rte_dpaa_device *dpaa_dev)
2143 {
2144         int diag;
2145         int ret;
2146         struct rte_eth_dev *eth_dev;
2147
2148         PMD_INIT_FUNC_TRACE();
2149
2150         if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
2151                 RTE_PKTMBUF_HEADROOM) {
2152                 DPAA_PMD_ERR(
2153                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
2154                 RTE_PKTMBUF_HEADROOM,
2155                 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
2156
2157                 return -1;
2158         }
2159
2160         /* In case of secondary process, the device is already configured
2161          * and no further action is required, except portal initialization
2162          * and verifying secondary attachment to port name.
2163          */
2164         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2165                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
2166                 if (!eth_dev)
2167                         return -ENOMEM;
2168                 eth_dev->device = &dpaa_dev->device;
2169                 eth_dev->dev_ops = &dpaa_devops;
2170
2171                 ret = dpaa_dev_init_secondary(eth_dev);
2172                 if (ret != 0) {
2173                         RTE_LOG(ERR, PMD, "secondary dev init failed\n");
2174                         return ret;
2175                 }
2176
2177                 rte_eth_dev_probing_finish(eth_dev);
2178                 return 0;
2179         }
2180
2181         if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
2182                 if (access("/tmp/fmc.bin", F_OK) == -1) {
2183                         DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
2184                         default_q = 1;
2185                 }
2186
2187                 if (!(default_q || fmc_q)) {
2188                         if (dpaa_fm_init()) {
2189                                 DPAA_PMD_ERR("FM init failed\n");
2190                                 return -1;
2191                         }
2192                 }
2193
2194                 /* disabling the default push mode for LS1043 */
2195                 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
2196                         dpaa_push_mode_max_queue = 0;
2197
2198                 /* if push mode queues to be enabled. Currenly we are allowing
2199                  * only one queue per thread.
2200                  */
2201                 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
2202                         dpaa_push_mode_max_queue =
2203                                         atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
2204                         if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
2205                             dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
2206                 }
2207
2208                 is_global_init = 1;
2209         }
2210
2211         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2212                 ret = rte_dpaa_portal_init((void *)1);
2213                 if (ret) {
2214                         DPAA_PMD_ERR("Unable to initialize portal");
2215                         return ret;
2216                 }
2217         }
2218
2219         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
2220         if (!eth_dev)
2221                 return -ENOMEM;
2222
2223         eth_dev->data->dev_private =
2224                         rte_zmalloc("ethdev private structure",
2225                                         sizeof(struct dpaa_if),
2226                                         RTE_CACHE_LINE_SIZE);
2227         if (!eth_dev->data->dev_private) {
2228                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
2229                 rte_eth_dev_release_port(eth_dev);
2230                 return -ENOMEM;
2231         }
2232
2233         eth_dev->device = &dpaa_dev->device;
2234         dpaa_dev->eth_dev = eth_dev;
2235
2236         qman_ern_register_cb(dpaa_free_mbuf);
2237
2238         if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
2239                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2240
2241         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2242
2243         /* Invoke PMD device initialization function */
2244         diag = dpaa_dev_init(eth_dev);
2245         if (diag == 0) {
2246                 rte_eth_dev_probing_finish(eth_dev);
2247                 return 0;
2248         }
2249
2250         rte_eth_dev_release_port(eth_dev);
2251         return diag;
2252 }
2253
2254 static int
2255 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
2256 {
2257         struct rte_eth_dev *eth_dev;
2258         int ret;
2259
2260         PMD_INIT_FUNC_TRACE();
2261
2262         eth_dev = dpaa_dev->eth_dev;
2263         dpaa_eth_dev_close(eth_dev);
2264         ret = rte_eth_dev_release_port(eth_dev);
2265
2266         return ret;
2267 }
2268
2269 static void __attribute__((destructor(102))) dpaa_finish(void)
2270 {
2271         /* For secondary, primary will do all the cleanup */
2272         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2273                 return;
2274
2275         if (!(default_q || fmc_q)) {
2276                 unsigned int i;
2277
2278                 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
2279                         if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
2280                                 struct rte_eth_dev *dev = &rte_eth_devices[i];
2281                                 struct dpaa_if *dpaa_intf =
2282                                         dev->data->dev_private;
2283                                 struct fman_if *fif =
2284                                         dev->process_private;
2285                                 if (dpaa_intf->port_handle)
2286                                         if (dpaa_fm_deconfig(dpaa_intf, fif))
2287                                                 DPAA_PMD_WARN("DPAA FM "
2288                                                         "deconfig failed\n");
2289                                 if (fif->num_profiles) {
2290                                         if (dpaa_port_vsp_cleanup(dpaa_intf,
2291                                                                   fif))
2292                                                 DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
2293                                 }
2294                         }
2295                 }
2296                 if (is_global_init)
2297                         if (dpaa_fm_term())
2298                                 DPAA_PMD_WARN("DPAA FM term failed\n");
2299
2300                 is_global_init = 0;
2301
2302                 DPAA_PMD_INFO("DPAA fman cleaned up");
2303         }
2304 }
2305
2306 static struct rte_dpaa_driver rte_dpaa_pmd = {
2307         .drv_flags = RTE_DPAA_DRV_INTR_LSC,
2308         .drv_type = FSL_DPAA_ETH,
2309         .probe = rte_dpaa_probe,
2310         .remove = rte_dpaa_remove,
2311 };
2312
2313 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
2314 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);