bus/dpaa: decouple FQ portal alloc and init
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
22 #include <rte_log.h>
23 #include <rte_debug.h>
24 #include <rte_pci.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
29 #include <rte_eal.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_malloc.h>
34 #include <rte_ring.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
39
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <rte_pmd_dpaa.h>
43
44 #include <fsl_usd.h>
45 #include <fsl_qman.h>
46 #include <fsl_bman.h>
47 #include <fsl_fman.h>
48
49 /* Supported Rx offloads */
50 static uint64_t dev_rx_offloads_sup =
51                 DEV_RX_OFFLOAD_JUMBO_FRAME |
52                 DEV_RX_OFFLOAD_SCATTER;
53
54 /* Rx offloads which cannot be disabled */
55 static uint64_t dev_rx_offloads_nodis =
56                 DEV_RX_OFFLOAD_IPV4_CKSUM |
57                 DEV_RX_OFFLOAD_UDP_CKSUM |
58                 DEV_RX_OFFLOAD_TCP_CKSUM |
59                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
60
61 /* Supported Tx offloads */
62 static uint64_t dev_tx_offloads_sup =
63                 DEV_TX_OFFLOAD_MT_LOCKFREE |
64                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
65
66 /* Tx offloads which cannot be disabled */
67 static uint64_t dev_tx_offloads_nodis =
68                 DEV_TX_OFFLOAD_IPV4_CKSUM |
69                 DEV_TX_OFFLOAD_UDP_CKSUM |
70                 DEV_TX_OFFLOAD_TCP_CKSUM |
71                 DEV_TX_OFFLOAD_SCTP_CKSUM |
72                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
73                 DEV_TX_OFFLOAD_MULTI_SEGS;
74
75 /* Keep track of whether QMAN and BMAN have been globally initialized */
76 static int is_global_init;
77 static int default_q;   /* use default queue - FMC is not executed*/
78 /* At present we only allow up to 4 push mode queues as default - as each of
79  * this queue need dedicated portal and we are short of portals.
80  */
81 #define DPAA_MAX_PUSH_MODE_QUEUE       8
82 #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
83
84 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
85 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
86
87
88 /* Per FQ Taildrop in frame count */
89 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
90
91 struct rte_dpaa_xstats_name_off {
92         char name[RTE_ETH_XSTATS_NAME_SIZE];
93         uint32_t offset;
94 };
95
96 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
97         {"rx_align_err",
98                 offsetof(struct dpaa_if_stats, raln)},
99         {"rx_valid_pause",
100                 offsetof(struct dpaa_if_stats, rxpf)},
101         {"rx_fcs_err",
102                 offsetof(struct dpaa_if_stats, rfcs)},
103         {"rx_vlan_frame",
104                 offsetof(struct dpaa_if_stats, rvlan)},
105         {"rx_frame_err",
106                 offsetof(struct dpaa_if_stats, rerr)},
107         {"rx_drop_err",
108                 offsetof(struct dpaa_if_stats, rdrp)},
109         {"rx_undersized",
110                 offsetof(struct dpaa_if_stats, rund)},
111         {"rx_oversize_err",
112                 offsetof(struct dpaa_if_stats, rovr)},
113         {"rx_fragment_pkt",
114                 offsetof(struct dpaa_if_stats, rfrg)},
115         {"tx_valid_pause",
116                 offsetof(struct dpaa_if_stats, txpf)},
117         {"tx_fcs_err",
118                 offsetof(struct dpaa_if_stats, terr)},
119         {"tx_vlan_frame",
120                 offsetof(struct dpaa_if_stats, tvlan)},
121         {"rx_undersized",
122                 offsetof(struct dpaa_if_stats, tund)},
123 };
124
125 static struct rte_dpaa_driver rte_dpaa_pmd;
126
127 static int
128 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
129
130 static inline void
131 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
132 {
133         memset(opts, 0, sizeof(struct qm_mcc_initfq));
134         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
135         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
136                            QM_FQCTRL_PREFERINCACHE;
137         opts->fqd.context_a.stashing.exclusive = 0;
138         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
139                 opts->fqd.context_a.stashing.annotation_cl =
140                                                 DPAA_IF_RX_ANNOTATION_STASH;
141         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
142         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
143 }
144
145 static int
146 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
147 {
148         struct dpaa_if *dpaa_intf = dev->data->dev_private;
149         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
150                                 + VLAN_TAG_SIZE;
151         uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
152
153         PMD_INIT_FUNC_TRACE();
154
155         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
156                 return -EINVAL;
157         /*
158          * Refuse mtu that requires the support of scattered packets
159          * when this feature has not been enabled before.
160          */
161         if (dev->data->min_rx_buf_size &&
162                 !dev->data->scattered_rx && frame_size > buffsz) {
163                 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
164                 return -EINVAL;
165         }
166
167         /* check <seg size> * <max_seg>  >= max_frame */
168         if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
169                 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
170                 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
171                                 buffsz * DPAA_SGT_MAX_ENTRIES);
172                 return -EINVAL;
173         }
174
175         if (frame_size > RTE_ETHER_MAX_LEN)
176                 dev->data->dev_conf.rxmode.offloads &=
177                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
178         else
179                 dev->data->dev_conf.rxmode.offloads &=
180                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
181
182         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
183
184         fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
185
186         return 0;
187 }
188
189 static int
190 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
191 {
192         struct dpaa_if *dpaa_intf = dev->data->dev_private;
193         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
194         uint64_t rx_offloads = eth_conf->rxmode.offloads;
195         uint64_t tx_offloads = eth_conf->txmode.offloads;
196
197         PMD_INIT_FUNC_TRACE();
198
199         /* Rx offloads which are enabled by default */
200         if (dev_rx_offloads_nodis & ~rx_offloads) {
201                 DPAA_PMD_INFO(
202                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
203                 " fixed are 0x%" PRIx64,
204                 rx_offloads, dev_rx_offloads_nodis);
205         }
206
207         /* Tx offloads which are enabled by default */
208         if (dev_tx_offloads_nodis & ~tx_offloads) {
209                 DPAA_PMD_INFO(
210                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
211                 " fixed are 0x%" PRIx64,
212                 tx_offloads, dev_tx_offloads_nodis);
213         }
214
215         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
216                 uint32_t max_len;
217
218                 DPAA_PMD_DEBUG("enabling jumbo");
219
220                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
221                     DPAA_MAX_RX_PKT_LEN)
222                         max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
223                 else {
224                         DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
225                                 "supported is %d",
226                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
227                                 DPAA_MAX_RX_PKT_LEN);
228                         max_len = DPAA_MAX_RX_PKT_LEN;
229                 }
230
231                 fman_if_set_maxfrm(dpaa_intf->fif, max_len);
232                 dev->data->mtu = max_len
233                         - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
234         }
235
236         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
237                 DPAA_PMD_DEBUG("enabling scatter mode");
238                 fman_if_set_sg(dpaa_intf->fif, 1);
239                 dev->data->scattered_rx = 1;
240         }
241
242         return 0;
243 }
244
245 static const uint32_t *
246 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
247 {
248         static const uint32_t ptypes[] = {
249                 RTE_PTYPE_L2_ETHER,
250                 RTE_PTYPE_L2_ETHER_VLAN,
251                 RTE_PTYPE_L2_ETHER_ARP,
252                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
253                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
254                 RTE_PTYPE_L4_ICMP,
255                 RTE_PTYPE_L4_TCP,
256                 RTE_PTYPE_L4_UDP,
257                 RTE_PTYPE_L4_FRAG,
258                 RTE_PTYPE_L4_TCP,
259                 RTE_PTYPE_L4_UDP,
260                 RTE_PTYPE_L4_SCTP
261         };
262
263         PMD_INIT_FUNC_TRACE();
264
265         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
266                 return ptypes;
267         return NULL;
268 }
269
270 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
271 {
272         struct dpaa_if *dpaa_intf = dev->data->dev_private;
273
274         PMD_INIT_FUNC_TRACE();
275
276         /* Change tx callback to the real one */
277         dev->tx_pkt_burst = dpaa_eth_queue_tx;
278         fman_if_enable_rx(dpaa_intf->fif);
279
280         return 0;
281 }
282
283 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
284 {
285         struct dpaa_if *dpaa_intf = dev->data->dev_private;
286
287         PMD_INIT_FUNC_TRACE();
288
289         fman_if_disable_rx(dpaa_intf->fif);
290         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
291 }
292
293 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
294 {
295         PMD_INIT_FUNC_TRACE();
296
297         dpaa_eth_dev_stop(dev);
298 }
299
300 static int
301 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
302                      char *fw_version,
303                      size_t fw_size)
304 {
305         int ret;
306         FILE *svr_file = NULL;
307         unsigned int svr_ver = 0;
308
309         PMD_INIT_FUNC_TRACE();
310
311         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
312         if (!svr_file) {
313                 DPAA_PMD_ERR("Unable to open SoC device");
314                 return -ENOTSUP; /* Not supported on this infra */
315         }
316         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
317                 dpaa_svr_family = svr_ver & SVR_MASK;
318         else
319                 DPAA_PMD_ERR("Unable to read SoC device");
320
321         fclose(svr_file);
322
323         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
324                        svr_ver, fman_ip_rev);
325         ret += 1; /* add the size of '\0' */
326
327         if (fw_size < (uint32_t)ret)
328                 return ret;
329         else
330                 return 0;
331 }
332
333 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
334                              struct rte_eth_dev_info *dev_info)
335 {
336         struct dpaa_if *dpaa_intf = dev->data->dev_private;
337
338         PMD_INIT_FUNC_TRACE();
339
340         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
341         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
342         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
343         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
344         dev_info->max_hash_mac_addrs = 0;
345         dev_info->max_vfs = 0;
346         dev_info->max_vmdq_pools = ETH_16_POOLS;
347         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
348
349         if (dpaa_intf->fif->mac_type == fman_mac_1g) {
350                 dev_info->speed_capa = ETH_LINK_SPEED_1G;
351         } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
352                 dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
353         } else {
354                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
355                              dpaa_intf->name, dpaa_intf->fif->mac_type);
356                 return -EINVAL;
357         }
358
359         dev_info->rx_offload_capa = dev_rx_offloads_sup |
360                                         dev_rx_offloads_nodis;
361         dev_info->tx_offload_capa = dev_tx_offloads_sup |
362                                         dev_tx_offloads_nodis;
363         dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
364         dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
365
366         return 0;
367 }
368
369 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
370                                 int wait_to_complete __rte_unused)
371 {
372         struct dpaa_if *dpaa_intf = dev->data->dev_private;
373         struct rte_eth_link *link = &dev->data->dev_link;
374
375         PMD_INIT_FUNC_TRACE();
376
377         if (dpaa_intf->fif->mac_type == fman_mac_1g)
378                 link->link_speed = ETH_SPEED_NUM_1G;
379         else if (dpaa_intf->fif->mac_type == fman_mac_10g)
380                 link->link_speed = ETH_SPEED_NUM_10G;
381         else
382                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
383                              dpaa_intf->name, dpaa_intf->fif->mac_type);
384
385         link->link_status = dpaa_intf->valid;
386         link->link_duplex = ETH_LINK_FULL_DUPLEX;
387         link->link_autoneg = ETH_LINK_AUTONEG;
388         return 0;
389 }
390
391 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
392                                struct rte_eth_stats *stats)
393 {
394         struct dpaa_if *dpaa_intf = dev->data->dev_private;
395
396         PMD_INIT_FUNC_TRACE();
397
398         fman_if_stats_get(dpaa_intf->fif, stats);
399         return 0;
400 }
401
402 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
403 {
404         struct dpaa_if *dpaa_intf = dev->data->dev_private;
405
406         PMD_INIT_FUNC_TRACE();
407
408         fman_if_stats_reset(dpaa_intf->fif);
409
410         return 0;
411 }
412
413 static int
414 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
415                     unsigned int n)
416 {
417         struct dpaa_if *dpaa_intf = dev->data->dev_private;
418         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
419         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
420
421         if (n < num)
422                 return num;
423
424         if (xstats == NULL)
425                 return 0;
426
427         fman_if_stats_get_all(dpaa_intf->fif, values,
428                               sizeof(struct dpaa_if_stats) / 8);
429
430         for (i = 0; i < num; i++) {
431                 xstats[i].id = i;
432                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
433         }
434         return i;
435 }
436
437 static int
438 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
439                       struct rte_eth_xstat_name *xstats_names,
440                       unsigned int limit)
441 {
442         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
443
444         if (limit < stat_cnt)
445                 return stat_cnt;
446
447         if (xstats_names != NULL)
448                 for (i = 0; i < stat_cnt; i++)
449                         strlcpy(xstats_names[i].name,
450                                 dpaa_xstats_strings[i].name,
451                                 sizeof(xstats_names[i].name));
452
453         return stat_cnt;
454 }
455
456 static int
457 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
458                       uint64_t *values, unsigned int n)
459 {
460         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
461         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
462
463         if (!ids) {
464                 struct dpaa_if *dpaa_intf = dev->data->dev_private;
465
466                 if (n < stat_cnt)
467                         return stat_cnt;
468
469                 if (!values)
470                         return 0;
471
472                 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
473                                       sizeof(struct dpaa_if_stats) / 8);
474
475                 for (i = 0; i < stat_cnt; i++)
476                         values[i] =
477                                 values_copy[dpaa_xstats_strings[i].offset / 8];
478
479                 return stat_cnt;
480         }
481
482         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
483
484         for (i = 0; i < n; i++) {
485                 if (ids[i] >= stat_cnt) {
486                         DPAA_PMD_ERR("id value isn't valid");
487                         return -1;
488                 }
489                 values[i] = values_copy[ids[i]];
490         }
491         return n;
492 }
493
494 static int
495 dpaa_xstats_get_names_by_id(
496         struct rte_eth_dev *dev,
497         struct rte_eth_xstat_name *xstats_names,
498         const uint64_t *ids,
499         unsigned int limit)
500 {
501         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
502         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
503
504         if (!ids)
505                 return dpaa_xstats_get_names(dev, xstats_names, limit);
506
507         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
508
509         for (i = 0; i < limit; i++) {
510                 if (ids[i] >= stat_cnt) {
511                         DPAA_PMD_ERR("id value isn't valid");
512                         return -1;
513                 }
514                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
515         }
516         return limit;
517 }
518
519 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
520 {
521         struct dpaa_if *dpaa_intf = dev->data->dev_private;
522
523         PMD_INIT_FUNC_TRACE();
524
525         fman_if_promiscuous_enable(dpaa_intf->fif);
526
527         return 0;
528 }
529
530 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
531 {
532         struct dpaa_if *dpaa_intf = dev->data->dev_private;
533
534         PMD_INIT_FUNC_TRACE();
535
536         fman_if_promiscuous_disable(dpaa_intf->fif);
537
538         return 0;
539 }
540
541 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
542 {
543         struct dpaa_if *dpaa_intf = dev->data->dev_private;
544
545         PMD_INIT_FUNC_TRACE();
546
547         fman_if_set_mcast_filter_table(dpaa_intf->fif);
548
549         return 0;
550 }
551
552 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
553 {
554         struct dpaa_if *dpaa_intf = dev->data->dev_private;
555
556         PMD_INIT_FUNC_TRACE();
557
558         fman_if_reset_mcast_filter_table(dpaa_intf->fif);
559
560         return 0;
561 }
562
563 static
564 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
565                             uint16_t nb_desc,
566                             unsigned int socket_id __rte_unused,
567                             const struct rte_eth_rxconf *rx_conf __rte_unused,
568                             struct rte_mempool *mp)
569 {
570         struct dpaa_if *dpaa_intf = dev->data->dev_private;
571         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
572         struct qm_mcc_initfq opts = {0};
573         u32 flags = 0;
574         int ret;
575         u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
576
577         PMD_INIT_FUNC_TRACE();
578
579         if (queue_idx >= dev->data->nb_rx_queues) {
580                 rte_errno = EOVERFLOW;
581                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
582                       (void *)dev, queue_idx, dev->data->nb_rx_queues);
583                 return -rte_errno;
584         }
585
586         DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
587                         queue_idx, rxq->fqid);
588
589         /* Max packet can fit in single buffer */
590         if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
591                 ;
592         } else if (dev->data->dev_conf.rxmode.offloads &
593                         DEV_RX_OFFLOAD_SCATTER) {
594                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
595                         buffsz * DPAA_SGT_MAX_ENTRIES) {
596                         DPAA_PMD_ERR("max RxPkt size %d too big to fit "
597                                 "MaxSGlist %d",
598                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
599                                 buffsz * DPAA_SGT_MAX_ENTRIES);
600                         rte_errno = EOVERFLOW;
601                         return -rte_errno;
602                 }
603         } else {
604                 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
605                      " larger than a single mbuf (%u) and scattered"
606                      " mode has not been requested",
607                      dev->data->dev_conf.rxmode.max_rx_pkt_len,
608                      buffsz - RTE_PKTMBUF_HEADROOM);
609         }
610
611         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
612                 struct fman_if_ic_params icp;
613                 uint32_t fd_offset;
614                 uint32_t bp_size;
615
616                 if (!mp->pool_data) {
617                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
618                         return -1;
619                 }
620                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
621
622                 memset(&icp, 0, sizeof(icp));
623                 /* set ICEOF for to the default value , which is 0*/
624                 icp.iciof = DEFAULT_ICIOF;
625                 icp.iceof = DEFAULT_RX_ICEOF;
626                 icp.icsz = DEFAULT_ICSZ;
627                 fman_if_set_ic_params(dpaa_intf->fif, &icp);
628
629                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
630                 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
631
632                 /* Buffer pool size should be equal to Dataroom Size*/
633                 bp_size = rte_pktmbuf_data_room_size(mp);
634                 fman_if_set_bp(dpaa_intf->fif, mp->size,
635                                dpaa_intf->bp_info->bpid, bp_size);
636                 dpaa_intf->valid = 1;
637                 DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
638                                 dpaa_intf->name, fd_offset,
639                                 fman_if_get_fdoff(dpaa_intf->fif));
640         }
641         DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
642                 fman_if_get_sg_enable(dpaa_intf->fif),
643                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
644         /* checking if push mode only, no error check for now */
645         if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
646                 struct qman_portal *qp;
647
648                 dpaa_push_queue_idx++;
649                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
650                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
651                                    QM_FQCTRL_CTXASTASHING |
652                                    QM_FQCTRL_PREFERINCACHE;
653                 opts.fqd.context_a.stashing.exclusive = 0;
654                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
655                  * So do not enable stashing in this case
656                  */
657                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
658                         opts.fqd.context_a.stashing.annotation_cl =
659                                                 DPAA_IF_RX_ANNOTATION_STASH;
660                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
661                 opts.fqd.context_a.stashing.context_cl =
662                                                 DPAA_IF_RX_CONTEXT_STASH;
663
664                 /*Create a channel and associate given queue with the channel*/
665                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
666                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
667                 opts.fqd.dest.channel = rxq->ch_id;
668                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
669                 flags = QMAN_INITFQ_FLAG_SCHED;
670
671                 /* Configure tail drop */
672                 if (dpaa_intf->cgr_rx) {
673                         opts.we_mask |= QM_INITFQ_WE_CGID;
674                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
675                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
676                 }
677                 ret = qman_init_fq(rxq, flags, &opts);
678                 if (ret) {
679                         DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
680                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
681                         return ret;
682                 }
683                 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
684                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
685                 } else {
686                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
687                         rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
688                 }
689
690                 rxq->is_static = true;
691
692                 /* Allocate qman specific portals */
693                 qp = fsl_qman_fq_portal_create();
694                 if (!qp) {
695                         DPAA_PMD_ERR("Unable to alloc fq portal");
696                         return -1;
697                 }
698                 rxq->qp = qp;
699         }
700         rxq->bp_array = rte_dpaa_bpid_info;
701         dev->data->rx_queues[queue_idx] = rxq;
702
703         /* configure the CGR size as per the desc size */
704         if (dpaa_intf->cgr_rx) {
705                 struct qm_mcc_initcgr cgr_opts = {0};
706
707                 /* Enable tail drop with cgr on this queue */
708                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
709                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
710                 if (ret) {
711                         DPAA_PMD_WARN(
712                                 "rx taildrop modify fail on fqid %d (ret=%d)",
713                                 rxq->fqid, ret);
714                 }
715         }
716
717         return 0;
718 }
719
720 int
721 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
722                 int eth_rx_queue_id,
723                 u16 ch_id,
724                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
725 {
726         int ret;
727         u32 flags = 0;
728         struct dpaa_if *dpaa_intf = dev->data->dev_private;
729         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
730         struct qm_mcc_initfq opts = {0};
731
732         if (dpaa_push_mode_max_queue)
733                 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
734                               "PUSH mode already enabled for first %d queues.\n"
735                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
736                               dpaa_push_mode_max_queue);
737
738         dpaa_poll_queue_default_config(&opts);
739
740         switch (queue_conf->ev.sched_type) {
741         case RTE_SCHED_TYPE_ATOMIC:
742                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
743                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
744                  * configuration with HOLD_ACTIVE setting
745                  */
746                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
747                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
748                 break;
749         case RTE_SCHED_TYPE_ORDERED:
750                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
751                 return -1;
752         default:
753                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
754                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
755                 break;
756         }
757
758         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
759         opts.fqd.dest.channel = ch_id;
760         opts.fqd.dest.wq = queue_conf->ev.priority;
761
762         if (dpaa_intf->cgr_rx) {
763                 opts.we_mask |= QM_INITFQ_WE_CGID;
764                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
765                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
766         }
767
768         flags = QMAN_INITFQ_FLAG_SCHED;
769
770         ret = qman_init_fq(rxq, flags, &opts);
771         if (ret) {
772                 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
773                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
774                 return ret;
775         }
776
777         /* copy configuration which needs to be filled during dequeue */
778         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
779         dev->data->rx_queues[eth_rx_queue_id] = rxq;
780
781         return ret;
782 }
783
784 int
785 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
786                 int eth_rx_queue_id)
787 {
788         struct qm_mcc_initfq opts;
789         int ret;
790         u32 flags = 0;
791         struct dpaa_if *dpaa_intf = dev->data->dev_private;
792         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
793
794         dpaa_poll_queue_default_config(&opts);
795
796         if (dpaa_intf->cgr_rx) {
797                 opts.we_mask |= QM_INITFQ_WE_CGID;
798                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
799                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
800         }
801
802         ret = qman_init_fq(rxq, flags, &opts);
803         if (ret) {
804                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
805                              rxq->fqid, ret);
806         }
807
808         rxq->cb.dqrr_dpdk_cb = NULL;
809         dev->data->rx_queues[eth_rx_queue_id] = NULL;
810
811         return 0;
812 }
813
814 static
815 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
816 {
817         PMD_INIT_FUNC_TRACE();
818 }
819
820 static
821 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
822                             uint16_t nb_desc __rte_unused,
823                 unsigned int socket_id __rte_unused,
824                 const struct rte_eth_txconf *tx_conf __rte_unused)
825 {
826         struct dpaa_if *dpaa_intf = dev->data->dev_private;
827
828         PMD_INIT_FUNC_TRACE();
829
830         if (queue_idx >= dev->data->nb_tx_queues) {
831                 rte_errno = EOVERFLOW;
832                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
833                       (void *)dev, queue_idx, dev->data->nb_tx_queues);
834                 return -rte_errno;
835         }
836
837         DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
838                         queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
839         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
840         return 0;
841 }
842
843 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
844 {
845         PMD_INIT_FUNC_TRACE();
846 }
847
848 static uint32_t
849 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
850 {
851         struct dpaa_if *dpaa_intf = dev->data->dev_private;
852         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
853         u32 frm_cnt = 0;
854
855         PMD_INIT_FUNC_TRACE();
856
857         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
858                 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
859                         rx_queue_id, frm_cnt);
860         }
861         return frm_cnt;
862 }
863
864 static int dpaa_link_down(struct rte_eth_dev *dev)
865 {
866         PMD_INIT_FUNC_TRACE();
867
868         dpaa_eth_dev_stop(dev);
869         return 0;
870 }
871
872 static int dpaa_link_up(struct rte_eth_dev *dev)
873 {
874         PMD_INIT_FUNC_TRACE();
875
876         dpaa_eth_dev_start(dev);
877         return 0;
878 }
879
880 static int
881 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
882                    struct rte_eth_fc_conf *fc_conf)
883 {
884         struct dpaa_if *dpaa_intf = dev->data->dev_private;
885         struct rte_eth_fc_conf *net_fc;
886
887         PMD_INIT_FUNC_TRACE();
888
889         if (!(dpaa_intf->fc_conf)) {
890                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
891                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
892                 if (!dpaa_intf->fc_conf) {
893                         DPAA_PMD_ERR("unable to save flow control info");
894                         return -ENOMEM;
895                 }
896         }
897         net_fc = dpaa_intf->fc_conf;
898
899         if (fc_conf->high_water < fc_conf->low_water) {
900                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
901                 return -EINVAL;
902         }
903
904         if (fc_conf->mode == RTE_FC_NONE) {
905                 return 0;
906         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
907                  fc_conf->mode == RTE_FC_FULL) {
908                 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
909                                          fc_conf->low_water,
910                                 dpaa_intf->bp_info->bpid);
911                 if (fc_conf->pause_time)
912                         fman_if_set_fc_quanta(dpaa_intf->fif,
913                                               fc_conf->pause_time);
914         }
915
916         /* Save the information in dpaa device */
917         net_fc->pause_time = fc_conf->pause_time;
918         net_fc->high_water = fc_conf->high_water;
919         net_fc->low_water = fc_conf->low_water;
920         net_fc->send_xon = fc_conf->send_xon;
921         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
922         net_fc->mode = fc_conf->mode;
923         net_fc->autoneg = fc_conf->autoneg;
924
925         return 0;
926 }
927
928 static int
929 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
930                    struct rte_eth_fc_conf *fc_conf)
931 {
932         struct dpaa_if *dpaa_intf = dev->data->dev_private;
933         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
934         int ret;
935
936         PMD_INIT_FUNC_TRACE();
937
938         if (net_fc) {
939                 fc_conf->pause_time = net_fc->pause_time;
940                 fc_conf->high_water = net_fc->high_water;
941                 fc_conf->low_water = net_fc->low_water;
942                 fc_conf->send_xon = net_fc->send_xon;
943                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
944                 fc_conf->mode = net_fc->mode;
945                 fc_conf->autoneg = net_fc->autoneg;
946                 return 0;
947         }
948         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
949         if (ret) {
950                 fc_conf->mode = RTE_FC_TX_PAUSE;
951                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
952         } else {
953                 fc_conf->mode = RTE_FC_NONE;
954         }
955
956         return 0;
957 }
958
959 static int
960 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
961                              struct rte_ether_addr *addr,
962                              uint32_t index,
963                              __rte_unused uint32_t pool)
964 {
965         int ret;
966         struct dpaa_if *dpaa_intf = dev->data->dev_private;
967
968         PMD_INIT_FUNC_TRACE();
969
970         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
971
972         if (ret)
973                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
974                         " err = %d", ret);
975         return 0;
976 }
977
978 static void
979 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
980                           uint32_t index)
981 {
982         struct dpaa_if *dpaa_intf = dev->data->dev_private;
983
984         PMD_INIT_FUNC_TRACE();
985
986         fman_if_clear_mac_addr(dpaa_intf->fif, index);
987 }
988
989 static int
990 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
991                        struct rte_ether_addr *addr)
992 {
993         int ret;
994         struct dpaa_if *dpaa_intf = dev->data->dev_private;
995
996         PMD_INIT_FUNC_TRACE();
997
998         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
999         if (ret)
1000                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
1001
1002         return ret;
1003 }
1004
1005 static struct eth_dev_ops dpaa_devops = {
1006         .dev_configure            = dpaa_eth_dev_configure,
1007         .dev_start                = dpaa_eth_dev_start,
1008         .dev_stop                 = dpaa_eth_dev_stop,
1009         .dev_close                = dpaa_eth_dev_close,
1010         .dev_infos_get            = dpaa_eth_dev_info,
1011         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1012
1013         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
1014         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
1015         .rx_queue_release         = dpaa_eth_rx_queue_release,
1016         .tx_queue_release         = dpaa_eth_tx_queue_release,
1017         .rx_queue_count           = dpaa_dev_rx_queue_count,
1018
1019         .flow_ctrl_get            = dpaa_flow_ctrl_get,
1020         .flow_ctrl_set            = dpaa_flow_ctrl_set,
1021
1022         .link_update              = dpaa_eth_link_update,
1023         .stats_get                = dpaa_eth_stats_get,
1024         .xstats_get               = dpaa_dev_xstats_get,
1025         .xstats_get_by_id         = dpaa_xstats_get_by_id,
1026         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
1027         .xstats_get_names         = dpaa_xstats_get_names,
1028         .xstats_reset             = dpaa_eth_stats_reset,
1029         .stats_reset              = dpaa_eth_stats_reset,
1030         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
1031         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
1032         .allmulticast_enable      = dpaa_eth_multicast_enable,
1033         .allmulticast_disable     = dpaa_eth_multicast_disable,
1034         .mtu_set                  = dpaa_mtu_set,
1035         .dev_set_link_down        = dpaa_link_down,
1036         .dev_set_link_up          = dpaa_link_up,
1037         .mac_addr_add             = dpaa_dev_add_mac_addr,
1038         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
1039         .mac_addr_set             = dpaa_dev_set_mac_addr,
1040
1041         .fw_version_get           = dpaa_fw_version_get,
1042 };
1043
1044 static bool
1045 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1046 {
1047         if (strcmp(dev->device->driver->name,
1048                    drv->driver.name))
1049                 return false;
1050
1051         return true;
1052 }
1053
1054 static bool
1055 is_dpaa_supported(struct rte_eth_dev *dev)
1056 {
1057         return is_device_supported(dev, &rte_dpaa_pmd);
1058 }
1059
1060 int
1061 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
1062 {
1063         struct rte_eth_dev *dev;
1064         struct dpaa_if *dpaa_intf;
1065
1066         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1067
1068         dev = &rte_eth_devices[port];
1069
1070         if (!is_dpaa_supported(dev))
1071                 return -ENOTSUP;
1072
1073         dpaa_intf = dev->data->dev_private;
1074
1075         if (on)
1076                 fman_if_loopback_enable(dpaa_intf->fif);
1077         else
1078                 fman_if_loopback_disable(dpaa_intf->fif);
1079
1080         return 0;
1081 }
1082
1083 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
1084 {
1085         struct rte_eth_fc_conf *fc_conf;
1086         int ret;
1087
1088         PMD_INIT_FUNC_TRACE();
1089
1090         if (!(dpaa_intf->fc_conf)) {
1091                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1092                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1093                 if (!dpaa_intf->fc_conf) {
1094                         DPAA_PMD_ERR("unable to save flow control info");
1095                         return -ENOMEM;
1096                 }
1097         }
1098         fc_conf = dpaa_intf->fc_conf;
1099         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
1100         if (ret) {
1101                 fc_conf->mode = RTE_FC_TX_PAUSE;
1102                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
1103         } else {
1104                 fc_conf->mode = RTE_FC_NONE;
1105         }
1106
1107         return 0;
1108 }
1109
1110 /* Initialise an Rx FQ */
1111 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1112                               uint32_t fqid)
1113 {
1114         struct qm_mcc_initfq opts = {0};
1115         int ret;
1116         u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1117         struct qm_mcc_initcgr cgr_opts = {
1118                 .we_mask = QM_CGR_WE_CS_THRES |
1119                                 QM_CGR_WE_CSTD_EN |
1120                                 QM_CGR_WE_MODE,
1121                 .cgr = {
1122                         .cstd_en = QM_CGR_EN,
1123                         .mode = QMAN_CGR_MODE_FRAME
1124                 }
1125         };
1126
1127         PMD_INIT_FUNC_TRACE();
1128
1129         if (fqid) {
1130                 ret = qman_reserve_fqid(fqid);
1131                 if (ret) {
1132                         DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
1133                                      fqid, ret);
1134                         return -EINVAL;
1135                 }
1136         } else {
1137                 flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1138         }
1139         DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1140         ret = qman_create_fq(fqid, flags, fq);
1141         if (ret) {
1142                 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1143                         fqid, ret);
1144                 return ret;
1145         }
1146         fq->is_static = false;
1147
1148         dpaa_poll_queue_default_config(&opts);
1149
1150         if (cgr_rx) {
1151                 /* Enable tail drop with cgr on this queue */
1152                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1153                 cgr_rx->cb = NULL;
1154                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1155                                       &cgr_opts);
1156                 if (ret) {
1157                         DPAA_PMD_WARN(
1158                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1159                                 fq->fqid, ret);
1160                         goto without_cgr;
1161                 }
1162                 opts.we_mask |= QM_INITFQ_WE_CGID;
1163                 opts.fqd.cgid = cgr_rx->cgrid;
1164                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1165         }
1166 without_cgr:
1167         ret = qman_init_fq(fq, 0, &opts);
1168         if (ret)
1169                 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1170         return ret;
1171 }
1172
1173 /* Initialise a Tx FQ */
1174 static int dpaa_tx_queue_init(struct qman_fq *fq,
1175                               struct fman_if *fman_intf)
1176 {
1177         struct qm_mcc_initfq opts = {0};
1178         int ret;
1179
1180         PMD_INIT_FUNC_TRACE();
1181
1182         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1183                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1184         if (ret) {
1185                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1186                 return ret;
1187         }
1188         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1189                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1190         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1191         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1192         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1193         opts.fqd.context_b = 0;
1194         /* no tx-confirmation */
1195         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1196         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1197         DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1198         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1199         if (ret)
1200                 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1201         return ret;
1202 }
1203
1204 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1205 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1206 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1207 {
1208         struct qm_mcc_initfq opts = {0};
1209         int ret;
1210
1211         PMD_INIT_FUNC_TRACE();
1212
1213         ret = qman_reserve_fqid(fqid);
1214         if (ret) {
1215                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1216                         fqid, ret);
1217                 return -EINVAL;
1218         }
1219         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1220         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1221         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1222         if (ret) {
1223                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1224                         fqid, ret);
1225                 return ret;
1226         }
1227         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1228         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1229         ret = qman_init_fq(fq, 0, &opts);
1230         if (ret)
1231                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1232                             fqid, ret);
1233         return ret;
1234 }
1235 #endif
1236
1237 /* Initialise a network interface */
1238 static int
1239 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1240 {
1241         int num_rx_fqs, fqid;
1242         int loop, ret = 0;
1243         int dev_id;
1244         struct rte_dpaa_device *dpaa_device;
1245         struct dpaa_if *dpaa_intf;
1246         struct fm_eth_port_cfg *cfg;
1247         struct fman_if *fman_intf;
1248         struct fman_if_bpool *bp, *tmp_bp;
1249         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1250
1251         PMD_INIT_FUNC_TRACE();
1252
1253         dpaa_intf = eth_dev->data->dev_private;
1254         /* For secondary processes, the primary has done all the work */
1255         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1256                 eth_dev->dev_ops = &dpaa_devops;
1257                 /* Plugging of UCODE burst API not supported in Secondary */
1258                 eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1259                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1260 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1261                 qman_set_fq_lookup_table(
1262                                 dpaa_intf->rx_queues->qman_fq_lookup_table);
1263 #endif
1264                 return 0;
1265         }
1266
1267         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1268         dev_id = dpaa_device->id.dev_id;
1269         dpaa_intf = eth_dev->data->dev_private;
1270         cfg = &dpaa_netcfg->port_cfg[dev_id];
1271         fman_intf = cfg->fman_if;
1272
1273         dpaa_intf->name = dpaa_device->name;
1274
1275         /* save fman_if & cfg in the interface struture */
1276         dpaa_intf->fif = fman_intf;
1277         dpaa_intf->ifid = dev_id;
1278         dpaa_intf->cfg = cfg;
1279
1280         /* Initialize Rx FQ's */
1281         if (default_q) {
1282                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1283         } else {
1284                 if (getenv("DPAA_NUM_RX_QUEUES"))
1285                         num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
1286                 else
1287                         num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1288         }
1289
1290
1291         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1292          * queues.
1293          */
1294         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1295                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1296                 return -EINVAL;
1297         }
1298
1299         dpaa_intf->rx_queues = rte_zmalloc(NULL,
1300                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1301         if (!dpaa_intf->rx_queues) {
1302                 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1303                 return -ENOMEM;
1304         }
1305
1306         /* If congestion control is enabled globally*/
1307         if (td_threshold) {
1308                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1309                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1310                 if (!dpaa_intf->cgr_rx) {
1311                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1312                         ret = -ENOMEM;
1313                         goto free_rx;
1314                 }
1315
1316                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1317                 if (ret != num_rx_fqs) {
1318                         DPAA_PMD_WARN("insufficient CGRIDs available");
1319                         ret = -EINVAL;
1320                         goto free_rx;
1321                 }
1322         } else {
1323                 dpaa_intf->cgr_rx = NULL;
1324         }
1325
1326         for (loop = 0; loop < num_rx_fqs; loop++) {
1327                 if (default_q)
1328                         fqid = cfg->rx_def;
1329                 else
1330                         fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx *
1331                                 DPAA_PCD_FQID_MULTIPLIER + loop;
1332
1333                 if (dpaa_intf->cgr_rx)
1334                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1335
1336                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1337                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1338                         fqid);
1339                 if (ret)
1340                         goto free_rx;
1341                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1342         }
1343         dpaa_intf->nb_rx_queues = num_rx_fqs;
1344
1345         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1346         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1347                 MAX_DPAA_CORES, MAX_CACHELINE);
1348         if (!dpaa_intf->tx_queues) {
1349                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1350                 ret = -ENOMEM;
1351                 goto free_rx;
1352         }
1353
1354         for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
1355                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1356                                          fman_intf);
1357                 if (ret)
1358                         goto free_tx;
1359                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1360         }
1361         dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
1362
1363 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1364         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1365                 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1366         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1367         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1368                 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1369         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1370 #endif
1371
1372         DPAA_PMD_DEBUG("All frame queues created");
1373
1374         /* Get the initial configuration for flow control */
1375         dpaa_fc_set_default(dpaa_intf);
1376
1377         /* reset bpool list, initialize bpool dynamically */
1378         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1379                 list_del(&bp->node);
1380                 rte_free(bp);
1381         }
1382
1383         /* Populate ethdev structure */
1384         eth_dev->dev_ops = &dpaa_devops;
1385         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1386         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1387
1388         /* Allocate memory for storing MAC addresses */
1389         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1390                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1391         if (eth_dev->data->mac_addrs == NULL) {
1392                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1393                                                 "store MAC addresses",
1394                                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1395                 ret = -ENOMEM;
1396                 goto free_tx;
1397         }
1398
1399         /* copy the primary mac address */
1400         rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
1401
1402         RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1403                 dpaa_device->name,
1404                 fman_intf->mac_addr.addr_bytes[0],
1405                 fman_intf->mac_addr.addr_bytes[1],
1406                 fman_intf->mac_addr.addr_bytes[2],
1407                 fman_intf->mac_addr.addr_bytes[3],
1408                 fman_intf->mac_addr.addr_bytes[4],
1409                 fman_intf->mac_addr.addr_bytes[5]);
1410
1411         /* Disable RX mode */
1412         fman_if_discard_rx_errors(fman_intf);
1413         fman_if_disable_rx(fman_intf);
1414         /* Disable promiscuous mode */
1415         fman_if_promiscuous_disable(fman_intf);
1416         /* Disable multicast */
1417         fman_if_reset_mcast_filter_table(fman_intf);
1418         /* Reset interface statistics */
1419         fman_if_stats_reset(fman_intf);
1420         /* Disable SG by default */
1421         fman_if_set_sg(fman_intf, 0);
1422         fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
1423
1424         return 0;
1425
1426 free_tx:
1427         rte_free(dpaa_intf->tx_queues);
1428         dpaa_intf->tx_queues = NULL;
1429         dpaa_intf->nb_tx_queues = 0;
1430
1431 free_rx:
1432         rte_free(dpaa_intf->cgr_rx);
1433         rte_free(dpaa_intf->rx_queues);
1434         dpaa_intf->rx_queues = NULL;
1435         dpaa_intf->nb_rx_queues = 0;
1436         return ret;
1437 }
1438
1439 static int
1440 dpaa_dev_uninit(struct rte_eth_dev *dev)
1441 {
1442         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1443         int loop;
1444
1445         PMD_INIT_FUNC_TRACE();
1446
1447         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1448                 return -EPERM;
1449
1450         if (!dpaa_intf) {
1451                 DPAA_PMD_WARN("Already closed or not started");
1452                 return -1;
1453         }
1454
1455         dpaa_eth_dev_close(dev);
1456
1457         /* release configuration memory */
1458         if (dpaa_intf->fc_conf)
1459                 rte_free(dpaa_intf->fc_conf);
1460
1461         /* Release RX congestion Groups */
1462         if (dpaa_intf->cgr_rx) {
1463                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1464                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1465
1466                 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1467                                          dpaa_intf->nb_rx_queues);
1468         }
1469
1470         rte_free(dpaa_intf->cgr_rx);
1471         dpaa_intf->cgr_rx = NULL;
1472
1473         rte_free(dpaa_intf->rx_queues);
1474         dpaa_intf->rx_queues = NULL;
1475
1476         rte_free(dpaa_intf->tx_queues);
1477         dpaa_intf->tx_queues = NULL;
1478
1479         dev->dev_ops = NULL;
1480         dev->rx_pkt_burst = NULL;
1481         dev->tx_pkt_burst = NULL;
1482
1483         return 0;
1484 }
1485
1486 static int
1487 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
1488                struct rte_dpaa_device *dpaa_dev)
1489 {
1490         int diag;
1491         int ret;
1492         struct rte_eth_dev *eth_dev;
1493
1494         PMD_INIT_FUNC_TRACE();
1495
1496         if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
1497                 RTE_PKTMBUF_HEADROOM) {
1498                 DPAA_PMD_ERR(
1499                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
1500                 RTE_PKTMBUF_HEADROOM,
1501                 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
1502
1503                 return -1;
1504         }
1505
1506         /* In case of secondary process, the device is already configured
1507          * and no further action is required, except portal initialization
1508          * and verifying secondary attachment to port name.
1509          */
1510         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1511                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1512                 if (!eth_dev)
1513                         return -ENOMEM;
1514                 eth_dev->device = &dpaa_dev->device;
1515                 eth_dev->dev_ops = &dpaa_devops;
1516                 rte_eth_dev_probing_finish(eth_dev);
1517                 return 0;
1518         }
1519
1520         if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
1521                 /* One time load of Qman/Bman drivers */
1522                 ret = qman_global_init();
1523                 if (ret) {
1524                         DPAA_PMD_ERR("QMAN initialization failed: %d",
1525                                      ret);
1526                         return ret;
1527                 }
1528                 ret = bman_global_init();
1529                 if (ret) {
1530                         DPAA_PMD_ERR("BMAN initialization failed: %d",
1531                                      ret);
1532                         return ret;
1533                 }
1534
1535                 if (access("/tmp/fmc.bin", F_OK) == -1) {
1536                         RTE_LOG(INFO, PMD,
1537                                 "* FMC not configured.Enabling default mode\n");
1538                         default_q = 1;
1539                 }
1540
1541                 /* disabling the default push mode for LS1043 */
1542                 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
1543                         dpaa_push_mode_max_queue = 0;
1544
1545                 /* if push mode queues to be enabled. Currenly we are allowing
1546                  * only one queue per thread.
1547                  */
1548                 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
1549                         dpaa_push_mode_max_queue =
1550                                         atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
1551                         if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
1552                             dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
1553                 }
1554
1555                 is_global_init = 1;
1556         }
1557
1558         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1559                 ret = rte_dpaa_portal_init((void *)1);
1560                 if (ret) {
1561                         DPAA_PMD_ERR("Unable to initialize portal");
1562                         return ret;
1563                 }
1564         }
1565
1566         /* In case of secondary process, the device is already configured
1567          * and no further action is required, except portal initialization
1568          * and verifying secondary attachment to port name.
1569          */
1570         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1571                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1572                 if (!eth_dev)
1573                         return -ENOMEM;
1574         } else {
1575                 eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1576                 if (eth_dev == NULL)
1577                         return -ENOMEM;
1578
1579                 eth_dev->data->dev_private = rte_zmalloc(
1580                                                 "ethdev private structure",
1581                                                 sizeof(struct dpaa_if),
1582                                                 RTE_CACHE_LINE_SIZE);
1583                 if (!eth_dev->data->dev_private) {
1584                         DPAA_PMD_ERR("Cannot allocate memzone for port data");
1585                         rte_eth_dev_release_port(eth_dev);
1586                         return -ENOMEM;
1587                 }
1588         }
1589         eth_dev->device = &dpaa_dev->device;
1590         dpaa_dev->eth_dev = eth_dev;
1591
1592         /* Invoke PMD device initialization function */
1593         diag = dpaa_dev_init(eth_dev);
1594         if (diag == 0) {
1595                 rte_eth_dev_probing_finish(eth_dev);
1596                 return 0;
1597         }
1598
1599         rte_eth_dev_release_port(eth_dev);
1600         return diag;
1601 }
1602
1603 static int
1604 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1605 {
1606         struct rte_eth_dev *eth_dev;
1607
1608         PMD_INIT_FUNC_TRACE();
1609
1610         eth_dev = dpaa_dev->eth_dev;
1611         dpaa_dev_uninit(eth_dev);
1612
1613         rte_eth_dev_release_port(eth_dev);
1614
1615         return 0;
1616 }
1617
1618 static struct rte_dpaa_driver rte_dpaa_pmd = {
1619         .drv_type = FSL_DPAA_ETH,
1620         .probe = rte_dpaa_probe,
1621         .remove = rte_dpaa_remove,
1622 };
1623
1624 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);