net/dpaa: fix the ethdev offload checks
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_log.h>
22 #include <rte_debug.h>
23 #include <rte_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_malloc.h>
33 #include <rte_ring.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
38
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
41 #include <rte_pmd_dpaa.h>
42
43 #include <fsl_usd.h>
44 #include <fsl_qman.h>
45 #include <fsl_bman.h>
46 #include <fsl_fman.h>
47
48 /* Supported Rx offloads */
49 static uint64_t dev_rx_offloads_sup =
50                 DEV_RX_OFFLOAD_JUMBO_FRAME;
51
52 /* Rx offloads which cannot be disabled */
53 static uint64_t dev_rx_offloads_nodis =
54                 DEV_RX_OFFLOAD_IPV4_CKSUM |
55                 DEV_RX_OFFLOAD_UDP_CKSUM |
56                 DEV_RX_OFFLOAD_TCP_CKSUM |
57                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
58                 DEV_RX_OFFLOAD_CRC_STRIP |
59                 DEV_RX_OFFLOAD_SCATTER;
60
61 /* Supported Tx offloads */
62 static uint64_t dev_tx_offloads_sup;
63
64 /* Tx offloads which cannot be disabled */
65 static uint64_t dev_tx_offloads_nodis =
66                 DEV_TX_OFFLOAD_IPV4_CKSUM |
67                 DEV_TX_OFFLOAD_UDP_CKSUM |
68                 DEV_TX_OFFLOAD_TCP_CKSUM |
69                 DEV_TX_OFFLOAD_SCTP_CKSUM |
70                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
71                 DEV_TX_OFFLOAD_MULTI_SEGS |
72                 DEV_TX_OFFLOAD_MT_LOCKFREE |
73                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
74
75 /* Keep track of whether QMAN and BMAN have been globally initialized */
76 static int is_global_init;
77 /* At present we only allow up to 4 push mode queues - as each of this queue
78  * need dedicated portal and we are short of portals.
79  */
80 #define DPAA_MAX_PUSH_MODE_QUEUE       4
81
82 static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
83 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
84
85
86 /* Per FQ Taildrop in frame count */
87 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
88
89 struct rte_dpaa_xstats_name_off {
90         char name[RTE_ETH_XSTATS_NAME_SIZE];
91         uint32_t offset;
92 };
93
94 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
95         {"rx_align_err",
96                 offsetof(struct dpaa_if_stats, raln)},
97         {"rx_valid_pause",
98                 offsetof(struct dpaa_if_stats, rxpf)},
99         {"rx_fcs_err",
100                 offsetof(struct dpaa_if_stats, rfcs)},
101         {"rx_vlan_frame",
102                 offsetof(struct dpaa_if_stats, rvlan)},
103         {"rx_frame_err",
104                 offsetof(struct dpaa_if_stats, rerr)},
105         {"rx_drop_err",
106                 offsetof(struct dpaa_if_stats, rdrp)},
107         {"rx_undersized",
108                 offsetof(struct dpaa_if_stats, rund)},
109         {"rx_oversize_err",
110                 offsetof(struct dpaa_if_stats, rovr)},
111         {"rx_fragment_pkt",
112                 offsetof(struct dpaa_if_stats, rfrg)},
113         {"tx_valid_pause",
114                 offsetof(struct dpaa_if_stats, txpf)},
115         {"tx_fcs_err",
116                 offsetof(struct dpaa_if_stats, terr)},
117         {"tx_vlan_frame",
118                 offsetof(struct dpaa_if_stats, tvlan)},
119         {"rx_undersized",
120                 offsetof(struct dpaa_if_stats, tund)},
121 };
122
123 static struct rte_dpaa_driver rte_dpaa_pmd;
124
125 static void
126 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
127
128 static inline void
129 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
130 {
131         memset(opts, 0, sizeof(struct qm_mcc_initfq));
132         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
133         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
134                            QM_FQCTRL_PREFERINCACHE;
135         opts->fqd.context_a.stashing.exclusive = 0;
136         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
137                 opts->fqd.context_a.stashing.annotation_cl =
138                                                 DPAA_IF_RX_ANNOTATION_STASH;
139         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
140         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
141 }
142
143 static int
144 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
145 {
146         struct dpaa_if *dpaa_intf = dev->data->dev_private;
147         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
148                                 + VLAN_TAG_SIZE;
149
150         PMD_INIT_FUNC_TRACE();
151
152         if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
153                 return -EINVAL;
154         if (frame_size > ETHER_MAX_LEN)
155                 dev->data->dev_conf.rxmode.offloads &=
156                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
157         else
158                 dev->data->dev_conf.rxmode.offloads &=
159                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
160
161         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
162
163         fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
164
165         return 0;
166 }
167
168 static int
169 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
170 {
171         struct dpaa_if *dpaa_intf = dev->data->dev_private;
172         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
173         uint64_t rx_offloads = eth_conf->rxmode.offloads;
174         uint64_t tx_offloads = eth_conf->txmode.offloads;
175
176         PMD_INIT_FUNC_TRACE();
177
178         /* Rx offloads validation */
179         if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
180                 DPAA_PMD_ERR(
181                 "Rx offloads non supported - requested 0x%" PRIx64
182                 " supported 0x%" PRIx64,
183                         rx_offloads,
184                         dev_rx_offloads_sup | dev_rx_offloads_nodis);
185                 return -ENOTSUP;
186         }
187         if (dev_rx_offloads_nodis & ~rx_offloads) {
188                 DPAA_PMD_WARN(
189                 "Rx offloads non configurable - requested 0x%" PRIx64
190                 " ignored 0x%" PRIx64,
191                         rx_offloads, dev_rx_offloads_nodis);
192         }
193
194         /* Tx offloads validation */
195         if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
196                 DPAA_PMD_ERR(
197                 "Tx offloads non supported - requested 0x%" PRIx64
198                 " supported 0x%" PRIx64,
199                         tx_offloads,
200                         dev_tx_offloads_sup | dev_tx_offloads_nodis);
201                 return -ENOTSUP;
202         }
203         if (dev_tx_offloads_nodis & ~tx_offloads) {
204                 DPAA_PMD_WARN(
205                 "Tx offloads non configurable - requested 0x%" PRIx64
206                 " ignored 0x%" PRIx64,
207                         tx_offloads, dev_tx_offloads_nodis);
208         }
209
210         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
211                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
212                     DPAA_MAX_RX_PKT_LEN) {
213                         fman_if_set_maxfrm(dpaa_intf->fif,
214                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
215                         return 0;
216                 } else {
217                         return -1;
218                 }
219         }
220         return 0;
221 }
222
223 static const uint32_t *
224 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
225 {
226         static const uint32_t ptypes[] = {
227                 /*todo -= add more types */
228                 RTE_PTYPE_L2_ETHER,
229                 RTE_PTYPE_L3_IPV4,
230                 RTE_PTYPE_L3_IPV4_EXT,
231                 RTE_PTYPE_L3_IPV6,
232                 RTE_PTYPE_L3_IPV6_EXT,
233                 RTE_PTYPE_L4_TCP,
234                 RTE_PTYPE_L4_UDP,
235                 RTE_PTYPE_L4_SCTP
236         };
237
238         PMD_INIT_FUNC_TRACE();
239
240         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
241                 return ptypes;
242         return NULL;
243 }
244
245 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
246 {
247         struct dpaa_if *dpaa_intf = dev->data->dev_private;
248
249         PMD_INIT_FUNC_TRACE();
250
251         /* Change tx callback to the real one */
252         dev->tx_pkt_burst = dpaa_eth_queue_tx;
253         fman_if_enable_rx(dpaa_intf->fif);
254
255         return 0;
256 }
257
258 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
259 {
260         struct dpaa_if *dpaa_intf = dev->data->dev_private;
261
262         PMD_INIT_FUNC_TRACE();
263
264         fman_if_disable_rx(dpaa_intf->fif);
265         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
266 }
267
268 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
269 {
270         PMD_INIT_FUNC_TRACE();
271
272         dpaa_eth_dev_stop(dev);
273 }
274
275 static int
276 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
277                      char *fw_version,
278                      size_t fw_size)
279 {
280         int ret;
281         FILE *svr_file = NULL;
282         unsigned int svr_ver = 0;
283
284         PMD_INIT_FUNC_TRACE();
285
286         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
287         if (!svr_file) {
288                 DPAA_PMD_ERR("Unable to open SoC device");
289                 return -ENOTSUP; /* Not supported on this infra */
290         }
291         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
292                 dpaa_svr_family = svr_ver & SVR_MASK;
293         else
294                 DPAA_PMD_ERR("Unable to read SoC device");
295
296         fclose(svr_file);
297
298         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
299                        svr_ver, fman_ip_rev);
300         ret += 1; /* add the size of '\0' */
301
302         if (fw_size < (uint32_t)ret)
303                 return ret;
304         else
305                 return 0;
306 }
307
308 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
309                               struct rte_eth_dev_info *dev_info)
310 {
311         struct dpaa_if *dpaa_intf = dev->data->dev_private;
312
313         PMD_INIT_FUNC_TRACE();
314
315         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
316         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
317         dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
318         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
319         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
320         dev_info->max_hash_mac_addrs = 0;
321         dev_info->max_vfs = 0;
322         dev_info->max_vmdq_pools = ETH_16_POOLS;
323         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
324         dev_info->speed_capa = (ETH_LINK_SPEED_1G |
325                                 ETH_LINK_SPEED_10G);
326         dev_info->rx_offload_capa = dev_rx_offloads_sup |
327                                         dev_rx_offloads_nodis;
328         dev_info->tx_offload_capa = dev_tx_offloads_sup |
329                                         dev_tx_offloads_nodis;
330 }
331
332 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
333                                 int wait_to_complete __rte_unused)
334 {
335         struct dpaa_if *dpaa_intf = dev->data->dev_private;
336         struct rte_eth_link *link = &dev->data->dev_link;
337
338         PMD_INIT_FUNC_TRACE();
339
340         if (dpaa_intf->fif->mac_type == fman_mac_1g)
341                 link->link_speed = ETH_SPEED_NUM_1G;
342         else if (dpaa_intf->fif->mac_type == fman_mac_10g)
343                 link->link_speed = ETH_SPEED_NUM_10G;
344         else
345                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
346                              dpaa_intf->name, dpaa_intf->fif->mac_type);
347
348         link->link_status = dpaa_intf->valid;
349         link->link_duplex = ETH_LINK_FULL_DUPLEX;
350         link->link_autoneg = ETH_LINK_AUTONEG;
351         return 0;
352 }
353
354 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
355                                struct rte_eth_stats *stats)
356 {
357         struct dpaa_if *dpaa_intf = dev->data->dev_private;
358
359         PMD_INIT_FUNC_TRACE();
360
361         fman_if_stats_get(dpaa_intf->fif, stats);
362         return 0;
363 }
364
365 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
366 {
367         struct dpaa_if *dpaa_intf = dev->data->dev_private;
368
369         PMD_INIT_FUNC_TRACE();
370
371         fman_if_stats_reset(dpaa_intf->fif);
372 }
373
374 static int
375 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
376                     unsigned int n)
377 {
378         struct dpaa_if *dpaa_intf = dev->data->dev_private;
379         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
380         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
381
382         if (xstats == NULL)
383                 return 0;
384
385         if (n < num)
386                 return num;
387
388         fman_if_stats_get_all(dpaa_intf->fif, values,
389                               sizeof(struct dpaa_if_stats) / 8);
390
391         for (i = 0; i < num; i++) {
392                 xstats[i].id = i;
393                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
394         }
395         return i;
396 }
397
398 static int
399 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
400                       struct rte_eth_xstat_name *xstats_names,
401                       unsigned int limit)
402 {
403         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
404
405         if (limit < stat_cnt)
406                 return stat_cnt;
407
408         if (xstats_names != NULL)
409                 for (i = 0; i < stat_cnt; i++)
410                         snprintf(xstats_names[i].name,
411                                  sizeof(xstats_names[i].name),
412                                  "%s",
413                                  dpaa_xstats_strings[i].name);
414
415         return stat_cnt;
416 }
417
418 static int
419 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
420                       uint64_t *values, unsigned int n)
421 {
422         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
423         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
424
425         if (!ids) {
426                 struct dpaa_if *dpaa_intf = dev->data->dev_private;
427
428                 if (n < stat_cnt)
429                         return stat_cnt;
430
431                 if (!values)
432                         return 0;
433
434                 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
435                                       sizeof(struct dpaa_if_stats) / 8);
436
437                 for (i = 0; i < stat_cnt; i++)
438                         values[i] =
439                                 values_copy[dpaa_xstats_strings[i].offset / 8];
440
441                 return stat_cnt;
442         }
443
444         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
445
446         for (i = 0; i < n; i++) {
447                 if (ids[i] >= stat_cnt) {
448                         DPAA_PMD_ERR("id value isn't valid");
449                         return -1;
450                 }
451                 values[i] = values_copy[ids[i]];
452         }
453         return n;
454 }
455
456 static int
457 dpaa_xstats_get_names_by_id(
458         struct rte_eth_dev *dev,
459         struct rte_eth_xstat_name *xstats_names,
460         const uint64_t *ids,
461         unsigned int limit)
462 {
463         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
464         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
465
466         if (!ids)
467                 return dpaa_xstats_get_names(dev, xstats_names, limit);
468
469         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
470
471         for (i = 0; i < limit; i++) {
472                 if (ids[i] >= stat_cnt) {
473                         DPAA_PMD_ERR("id value isn't valid");
474                         return -1;
475                 }
476                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
477         }
478         return limit;
479 }
480
481 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
482 {
483         struct dpaa_if *dpaa_intf = dev->data->dev_private;
484
485         PMD_INIT_FUNC_TRACE();
486
487         fman_if_promiscuous_enable(dpaa_intf->fif);
488 }
489
490 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
491 {
492         struct dpaa_if *dpaa_intf = dev->data->dev_private;
493
494         PMD_INIT_FUNC_TRACE();
495
496         fman_if_promiscuous_disable(dpaa_intf->fif);
497 }
498
499 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
500 {
501         struct dpaa_if *dpaa_intf = dev->data->dev_private;
502
503         PMD_INIT_FUNC_TRACE();
504
505         fman_if_set_mcast_filter_table(dpaa_intf->fif);
506 }
507
508 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
509 {
510         struct dpaa_if *dpaa_intf = dev->data->dev_private;
511
512         PMD_INIT_FUNC_TRACE();
513
514         fman_if_reset_mcast_filter_table(dpaa_intf->fif);
515 }
516
517 static
518 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
519                             uint16_t nb_desc,
520                             unsigned int socket_id __rte_unused,
521                             const struct rte_eth_rxconf *rx_conf __rte_unused,
522                             struct rte_mempool *mp)
523 {
524         struct dpaa_if *dpaa_intf = dev->data->dev_private;
525         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
526         struct qm_mcc_initfq opts = {0};
527         u32 flags = 0;
528         int ret;
529
530         PMD_INIT_FUNC_TRACE();
531
532         DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
533
534         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
535                 struct fman_if_ic_params icp;
536                 uint32_t fd_offset;
537                 uint32_t bp_size;
538
539                 if (!mp->pool_data) {
540                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
541                         return -1;
542                 }
543                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
544
545                 memset(&icp, 0, sizeof(icp));
546                 /* set ICEOF for to the default value , which is 0*/
547                 icp.iciof = DEFAULT_ICIOF;
548                 icp.iceof = DEFAULT_RX_ICEOF;
549                 icp.icsz = DEFAULT_ICSZ;
550                 fman_if_set_ic_params(dpaa_intf->fif, &icp);
551
552                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
553                 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
554
555                 /* Buffer pool size should be equal to Dataroom Size*/
556                 bp_size = rte_pktmbuf_data_room_size(mp);
557                 fman_if_set_bp(dpaa_intf->fif, mp->size,
558                                dpaa_intf->bp_info->bpid, bp_size);
559                 dpaa_intf->valid = 1;
560                 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
561                             dpaa_intf->name, fd_offset,
562                         fman_if_get_fdoff(dpaa_intf->fif));
563         }
564         /* checking if push mode only, no error check for now */
565         if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
566                 dpaa_push_queue_idx++;
567                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
568                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
569                                    QM_FQCTRL_CTXASTASHING |
570                                    QM_FQCTRL_PREFERINCACHE;
571                 opts.fqd.context_a.stashing.exclusive = 0;
572                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
573                  * So do not enable stashing in this case
574                  */
575                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
576                         opts.fqd.context_a.stashing.annotation_cl =
577                                                 DPAA_IF_RX_ANNOTATION_STASH;
578                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
579                 opts.fqd.context_a.stashing.context_cl =
580                                                 DPAA_IF_RX_CONTEXT_STASH;
581
582                 /*Create a channel and associate given queue with the channel*/
583                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
584                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
585                 opts.fqd.dest.channel = rxq->ch_id;
586                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
587                 flags = QMAN_INITFQ_FLAG_SCHED;
588
589                 /* Configure tail drop */
590                 if (dpaa_intf->cgr_rx) {
591                         opts.we_mask |= QM_INITFQ_WE_CGID;
592                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
593                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
594                 }
595                 ret = qman_init_fq(rxq, flags, &opts);
596                 if (ret)
597                         DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
598                                      " ret: %d", rxq->fqid, ret);
599                 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
600                 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
601                 rxq->is_static = true;
602         }
603         dev->data->rx_queues[queue_idx] = rxq;
604
605         /* configure the CGR size as per the desc size */
606         if (dpaa_intf->cgr_rx) {
607                 struct qm_mcc_initcgr cgr_opts = {0};
608
609                 /* Enable tail drop with cgr on this queue */
610                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
611                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
612                 if (ret) {
613                         DPAA_PMD_WARN(
614                                 "rx taildrop modify fail on fqid %d (ret=%d)",
615                                 rxq->fqid, ret);
616                 }
617         }
618
619         return 0;
620 }
621
622 int __rte_experimental
623 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
624                 int eth_rx_queue_id,
625                 u16 ch_id,
626                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
627 {
628         int ret;
629         u32 flags = 0;
630         struct dpaa_if *dpaa_intf = dev->data->dev_private;
631         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
632         struct qm_mcc_initfq opts = {0};
633
634         if (dpaa_push_mode_max_queue)
635                 DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
636                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
637                               dpaa_push_mode_max_queue);
638
639         dpaa_poll_queue_default_config(&opts);
640
641         switch (queue_conf->ev.sched_type) {
642         case RTE_SCHED_TYPE_ATOMIC:
643                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
644                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
645                  * configuration with HOLD_ACTIVE setting
646                  */
647                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
648                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
649                 break;
650         case RTE_SCHED_TYPE_ORDERED:
651                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
652                 return -1;
653         default:
654                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
655                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
656                 break;
657         }
658
659         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
660         opts.fqd.dest.channel = ch_id;
661         opts.fqd.dest.wq = queue_conf->ev.priority;
662
663         if (dpaa_intf->cgr_rx) {
664                 opts.we_mask |= QM_INITFQ_WE_CGID;
665                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
666                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
667         }
668
669         flags = QMAN_INITFQ_FLAG_SCHED;
670
671         ret = qman_init_fq(rxq, flags, &opts);
672         if (ret) {
673                 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
674                              rxq->fqid, ret);
675                 return ret;
676         }
677
678         /* copy configuration which needs to be filled during dequeue */
679         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
680         dev->data->rx_queues[eth_rx_queue_id] = rxq;
681
682         return ret;
683 }
684
685 int __rte_experimental
686 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
687                 int eth_rx_queue_id)
688 {
689         struct qm_mcc_initfq opts;
690         int ret;
691         u32 flags = 0;
692         struct dpaa_if *dpaa_intf = dev->data->dev_private;
693         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
694
695         dpaa_poll_queue_default_config(&opts);
696
697         if (dpaa_intf->cgr_rx) {
698                 opts.we_mask |= QM_INITFQ_WE_CGID;
699                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
700                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
701         }
702
703         ret = qman_init_fq(rxq, flags, &opts);
704         if (ret) {
705                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
706                              rxq->fqid, ret);
707         }
708
709         rxq->cb.dqrr_dpdk_cb = NULL;
710         dev->data->rx_queues[eth_rx_queue_id] = NULL;
711
712         return 0;
713 }
714
715 static
716 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
717 {
718         PMD_INIT_FUNC_TRACE();
719 }
720
721 static
722 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
723                             uint16_t nb_desc __rte_unused,
724                 unsigned int socket_id __rte_unused,
725                 const struct rte_eth_txconf *tx_conf __rte_unused)
726 {
727         struct dpaa_if *dpaa_intf = dev->data->dev_private;
728
729         PMD_INIT_FUNC_TRACE();
730
731         DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
732         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
733         return 0;
734 }
735
736 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
737 {
738         PMD_INIT_FUNC_TRACE();
739 }
740
741 static uint32_t
742 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
743 {
744         struct dpaa_if *dpaa_intf = dev->data->dev_private;
745         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
746         u32 frm_cnt = 0;
747
748         PMD_INIT_FUNC_TRACE();
749
750         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
751                 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
752                         rx_queue_id, frm_cnt);
753         }
754         return frm_cnt;
755 }
756
757 static int dpaa_link_down(struct rte_eth_dev *dev)
758 {
759         PMD_INIT_FUNC_TRACE();
760
761         dpaa_eth_dev_stop(dev);
762         return 0;
763 }
764
765 static int dpaa_link_up(struct rte_eth_dev *dev)
766 {
767         PMD_INIT_FUNC_TRACE();
768
769         dpaa_eth_dev_start(dev);
770         return 0;
771 }
772
773 static int
774 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
775                    struct rte_eth_fc_conf *fc_conf)
776 {
777         struct dpaa_if *dpaa_intf = dev->data->dev_private;
778         struct rte_eth_fc_conf *net_fc;
779
780         PMD_INIT_FUNC_TRACE();
781
782         if (!(dpaa_intf->fc_conf)) {
783                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
784                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
785                 if (!dpaa_intf->fc_conf) {
786                         DPAA_PMD_ERR("unable to save flow control info");
787                         return -ENOMEM;
788                 }
789         }
790         net_fc = dpaa_intf->fc_conf;
791
792         if (fc_conf->high_water < fc_conf->low_water) {
793                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
794                 return -EINVAL;
795         }
796
797         if (fc_conf->mode == RTE_FC_NONE) {
798                 return 0;
799         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
800                  fc_conf->mode == RTE_FC_FULL) {
801                 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
802                                          fc_conf->low_water,
803                                 dpaa_intf->bp_info->bpid);
804                 if (fc_conf->pause_time)
805                         fman_if_set_fc_quanta(dpaa_intf->fif,
806                                               fc_conf->pause_time);
807         }
808
809         /* Save the information in dpaa device */
810         net_fc->pause_time = fc_conf->pause_time;
811         net_fc->high_water = fc_conf->high_water;
812         net_fc->low_water = fc_conf->low_water;
813         net_fc->send_xon = fc_conf->send_xon;
814         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
815         net_fc->mode = fc_conf->mode;
816         net_fc->autoneg = fc_conf->autoneg;
817
818         return 0;
819 }
820
821 static int
822 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
823                    struct rte_eth_fc_conf *fc_conf)
824 {
825         struct dpaa_if *dpaa_intf = dev->data->dev_private;
826         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
827         int ret;
828
829         PMD_INIT_FUNC_TRACE();
830
831         if (net_fc) {
832                 fc_conf->pause_time = net_fc->pause_time;
833                 fc_conf->high_water = net_fc->high_water;
834                 fc_conf->low_water = net_fc->low_water;
835                 fc_conf->send_xon = net_fc->send_xon;
836                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
837                 fc_conf->mode = net_fc->mode;
838                 fc_conf->autoneg = net_fc->autoneg;
839                 return 0;
840         }
841         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
842         if (ret) {
843                 fc_conf->mode = RTE_FC_TX_PAUSE;
844                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
845         } else {
846                 fc_conf->mode = RTE_FC_NONE;
847         }
848
849         return 0;
850 }
851
852 static int
853 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
854                              struct ether_addr *addr,
855                              uint32_t index,
856                              __rte_unused uint32_t pool)
857 {
858         int ret;
859         struct dpaa_if *dpaa_intf = dev->data->dev_private;
860
861         PMD_INIT_FUNC_TRACE();
862
863         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
864
865         if (ret)
866                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
867                         " err = %d", ret);
868         return 0;
869 }
870
871 static void
872 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
873                           uint32_t index)
874 {
875         struct dpaa_if *dpaa_intf = dev->data->dev_private;
876
877         PMD_INIT_FUNC_TRACE();
878
879         fman_if_clear_mac_addr(dpaa_intf->fif, index);
880 }
881
882 static int
883 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
884                        struct ether_addr *addr)
885 {
886         int ret;
887         struct dpaa_if *dpaa_intf = dev->data->dev_private;
888
889         PMD_INIT_FUNC_TRACE();
890
891         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
892         if (ret)
893                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
894
895         return ret;
896 }
897
898 static struct eth_dev_ops dpaa_devops = {
899         .dev_configure            = dpaa_eth_dev_configure,
900         .dev_start                = dpaa_eth_dev_start,
901         .dev_stop                 = dpaa_eth_dev_stop,
902         .dev_close                = dpaa_eth_dev_close,
903         .dev_infos_get            = dpaa_eth_dev_info,
904         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
905
906         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
907         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
908         .rx_queue_release         = dpaa_eth_rx_queue_release,
909         .tx_queue_release         = dpaa_eth_tx_queue_release,
910         .rx_queue_count           = dpaa_dev_rx_queue_count,
911
912         .flow_ctrl_get            = dpaa_flow_ctrl_get,
913         .flow_ctrl_set            = dpaa_flow_ctrl_set,
914
915         .link_update              = dpaa_eth_link_update,
916         .stats_get                = dpaa_eth_stats_get,
917         .xstats_get               = dpaa_dev_xstats_get,
918         .xstats_get_by_id         = dpaa_xstats_get_by_id,
919         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
920         .xstats_get_names         = dpaa_xstats_get_names,
921         .xstats_reset             = dpaa_eth_stats_reset,
922         .stats_reset              = dpaa_eth_stats_reset,
923         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
924         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
925         .allmulticast_enable      = dpaa_eth_multicast_enable,
926         .allmulticast_disable     = dpaa_eth_multicast_disable,
927         .mtu_set                  = dpaa_mtu_set,
928         .dev_set_link_down        = dpaa_link_down,
929         .dev_set_link_up          = dpaa_link_up,
930         .mac_addr_add             = dpaa_dev_add_mac_addr,
931         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
932         .mac_addr_set             = dpaa_dev_set_mac_addr,
933
934         .fw_version_get           = dpaa_fw_version_get,
935 };
936
937 static bool
938 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
939 {
940         if (strcmp(dev->device->driver->name,
941                    drv->driver.name))
942                 return false;
943
944         return true;
945 }
946
947 static bool
948 is_dpaa_supported(struct rte_eth_dev *dev)
949 {
950         return is_device_supported(dev, &rte_dpaa_pmd);
951 }
952
953 int __rte_experimental
954 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
955 {
956         struct rte_eth_dev *dev;
957         struct dpaa_if *dpaa_intf;
958
959         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
960
961         dev = &rte_eth_devices[port];
962
963         if (!is_dpaa_supported(dev))
964                 return -ENOTSUP;
965
966         dpaa_intf = dev->data->dev_private;
967
968         if (on)
969                 fman_if_loopback_enable(dpaa_intf->fif);
970         else
971                 fman_if_loopback_disable(dpaa_intf->fif);
972
973         return 0;
974 }
975
976 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
977 {
978         struct rte_eth_fc_conf *fc_conf;
979         int ret;
980
981         PMD_INIT_FUNC_TRACE();
982
983         if (!(dpaa_intf->fc_conf)) {
984                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
985                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
986                 if (!dpaa_intf->fc_conf) {
987                         DPAA_PMD_ERR("unable to save flow control info");
988                         return -ENOMEM;
989                 }
990         }
991         fc_conf = dpaa_intf->fc_conf;
992         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
993         if (ret) {
994                 fc_conf->mode = RTE_FC_TX_PAUSE;
995                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
996         } else {
997                 fc_conf->mode = RTE_FC_NONE;
998         }
999
1000         return 0;
1001 }
1002
1003 /* Initialise an Rx FQ */
1004 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1005                               uint32_t fqid)
1006 {
1007         struct qm_mcc_initfq opts = {0};
1008         int ret;
1009         u32 flags = 0;
1010         struct qm_mcc_initcgr cgr_opts = {
1011                 .we_mask = QM_CGR_WE_CS_THRES |
1012                                 QM_CGR_WE_CSTD_EN |
1013                                 QM_CGR_WE_MODE,
1014                 .cgr = {
1015                         .cstd_en = QM_CGR_EN,
1016                         .mode = QMAN_CGR_MODE_FRAME
1017                 }
1018         };
1019
1020         PMD_INIT_FUNC_TRACE();
1021
1022         ret = qman_reserve_fqid(fqid);
1023         if (ret) {
1024                 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
1025                              fqid, ret);
1026                 return -EINVAL;
1027         }
1028
1029         DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
1030         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1031         if (ret) {
1032                 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
1033                         fqid, ret);
1034                 return ret;
1035         }
1036         fq->is_static = false;
1037
1038         dpaa_poll_queue_default_config(&opts);
1039
1040         if (cgr_rx) {
1041                 /* Enable tail drop with cgr on this queue */
1042                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1043                 cgr_rx->cb = NULL;
1044                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1045                                       &cgr_opts);
1046                 if (ret) {
1047                         DPAA_PMD_WARN(
1048                                 "rx taildrop init fail on rx fqid %d (ret=%d)",
1049                                 fqid, ret);
1050                         goto without_cgr;
1051                 }
1052                 opts.we_mask |= QM_INITFQ_WE_CGID;
1053                 opts.fqd.cgid = cgr_rx->cgrid;
1054                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1055         }
1056 without_cgr:
1057         ret = qman_init_fq(fq, flags, &opts);
1058         if (ret)
1059                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
1060         return ret;
1061 }
1062
1063 /* Initialise a Tx FQ */
1064 static int dpaa_tx_queue_init(struct qman_fq *fq,
1065                               struct fman_if *fman_intf)
1066 {
1067         struct qm_mcc_initfq opts = {0};
1068         int ret;
1069
1070         PMD_INIT_FUNC_TRACE();
1071
1072         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1073                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1074         if (ret) {
1075                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1076                 return ret;
1077         }
1078         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1079                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1080         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1081         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1082         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1083         opts.fqd.context_b = 0;
1084         /* no tx-confirmation */
1085         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1086         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1087         DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
1088         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1089         if (ret)
1090                 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
1091         return ret;
1092 }
1093
1094 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1095 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1096 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1097 {
1098         struct qm_mcc_initfq opts = {0};
1099         int ret;
1100
1101         PMD_INIT_FUNC_TRACE();
1102
1103         ret = qman_reserve_fqid(fqid);
1104         if (ret) {
1105                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1106                         fqid, ret);
1107                 return -EINVAL;
1108         }
1109         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1110         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1111         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1112         if (ret) {
1113                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1114                         fqid, ret);
1115                 return ret;
1116         }
1117         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1118         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1119         ret = qman_init_fq(fq, 0, &opts);
1120         if (ret)
1121                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1122                             fqid, ret);
1123         return ret;
1124 }
1125 #endif
1126
1127 /* Initialise a network interface */
1128 static int
1129 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1130 {
1131         int num_cores, num_rx_fqs, fqid;
1132         int loop, ret = 0;
1133         int dev_id;
1134         struct rte_dpaa_device *dpaa_device;
1135         struct dpaa_if *dpaa_intf;
1136         struct fm_eth_port_cfg *cfg;
1137         struct fman_if *fman_intf;
1138         struct fman_if_bpool *bp, *tmp_bp;
1139         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1140
1141         PMD_INIT_FUNC_TRACE();
1142
1143         /* For secondary processes, the primary has done all the work */
1144         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1145                 return 0;
1146
1147         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1148         dev_id = dpaa_device->id.dev_id;
1149         dpaa_intf = eth_dev->data->dev_private;
1150         cfg = &dpaa_netcfg->port_cfg[dev_id];
1151         fman_intf = cfg->fman_if;
1152
1153         dpaa_intf->name = dpaa_device->name;
1154
1155         /* save fman_if & cfg in the interface struture */
1156         dpaa_intf->fif = fman_intf;
1157         dpaa_intf->ifid = dev_id;
1158         dpaa_intf->cfg = cfg;
1159
1160         /* Initialize Rx FQ's */
1161         if (getenv("DPAA_NUM_RX_QUEUES"))
1162                 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
1163         else
1164                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1165
1166         /* if push mode queues to be enabled. Currenly we are allowing only
1167          * one queue per thread.
1168          */
1169         if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
1170                 dpaa_push_mode_max_queue =
1171                                 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
1172                 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
1173                         dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
1174         }
1175
1176         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1177          * queues.
1178          */
1179         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1180                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1181                 return -EINVAL;
1182         }
1183
1184         dpaa_intf->rx_queues = rte_zmalloc(NULL,
1185                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1186         if (!dpaa_intf->rx_queues) {
1187                 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1188                 return -ENOMEM;
1189         }
1190
1191         /* If congestion control is enabled globally*/
1192         if (td_threshold) {
1193                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1194                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1195                 if (!dpaa_intf->cgr_rx) {
1196                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1197                         ret = -ENOMEM;
1198                         goto free_rx;
1199                 }
1200
1201                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1202                 if (ret != num_rx_fqs) {
1203                         DPAA_PMD_WARN("insufficient CGRIDs available");
1204                         ret = -EINVAL;
1205                         goto free_rx;
1206                 }
1207         } else {
1208                 dpaa_intf->cgr_rx = NULL;
1209         }
1210
1211         for (loop = 0; loop < num_rx_fqs; loop++) {
1212                 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
1213                         DPAA_PCD_FQID_MULTIPLIER + loop;
1214
1215                 if (dpaa_intf->cgr_rx)
1216                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1217
1218                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1219                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1220                         fqid);
1221                 if (ret)
1222                         goto free_rx;
1223                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1224         }
1225         dpaa_intf->nb_rx_queues = num_rx_fqs;
1226
1227         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1228         num_cores = rte_lcore_count();
1229         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1230                 num_cores, MAX_CACHELINE);
1231         if (!dpaa_intf->tx_queues) {
1232                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1233                 ret = -ENOMEM;
1234                 goto free_rx;
1235         }
1236
1237         for (loop = 0; loop < num_cores; loop++) {
1238                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1239                                          fman_intf);
1240                 if (ret)
1241                         goto free_tx;
1242                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1243         }
1244         dpaa_intf->nb_tx_queues = num_cores;
1245
1246 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1247         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1248                 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1249         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1250         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1251                 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1252         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1253 #endif
1254
1255         DPAA_PMD_DEBUG("All frame queues created");
1256
1257         /* Get the initial configuration for flow control */
1258         dpaa_fc_set_default(dpaa_intf);
1259
1260         /* reset bpool list, initialize bpool dynamically */
1261         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1262                 list_del(&bp->node);
1263                 free(bp);
1264         }
1265
1266         /* Populate ethdev structure */
1267         eth_dev->dev_ops = &dpaa_devops;
1268         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1269         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1270
1271         /* Allocate memory for storing MAC addresses */
1272         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1273                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1274         if (eth_dev->data->mac_addrs == NULL) {
1275                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1276                                                 "store MAC addresses",
1277                                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1278                 ret = -ENOMEM;
1279                 goto free_tx;
1280         }
1281
1282         /* copy the primary mac address */
1283         ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
1284
1285         RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1286                 dpaa_device->name,
1287                 fman_intf->mac_addr.addr_bytes[0],
1288                 fman_intf->mac_addr.addr_bytes[1],
1289                 fman_intf->mac_addr.addr_bytes[2],
1290                 fman_intf->mac_addr.addr_bytes[3],
1291                 fman_intf->mac_addr.addr_bytes[4],
1292                 fman_intf->mac_addr.addr_bytes[5]);
1293
1294         /* Disable RX mode */
1295         fman_if_discard_rx_errors(fman_intf);
1296         fman_if_disable_rx(fman_intf);
1297         /* Disable promiscuous mode */
1298         fman_if_promiscuous_disable(fman_intf);
1299         /* Disable multicast */
1300         fman_if_reset_mcast_filter_table(fman_intf);
1301         /* Reset interface statistics */
1302         fman_if_stats_reset(fman_intf);
1303
1304         return 0;
1305
1306 free_tx:
1307         rte_free(dpaa_intf->tx_queues);
1308         dpaa_intf->tx_queues = NULL;
1309         dpaa_intf->nb_tx_queues = 0;
1310
1311 free_rx:
1312         rte_free(dpaa_intf->cgr_rx);
1313         rte_free(dpaa_intf->rx_queues);
1314         dpaa_intf->rx_queues = NULL;
1315         dpaa_intf->nb_rx_queues = 0;
1316         return ret;
1317 }
1318
1319 static int
1320 dpaa_dev_uninit(struct rte_eth_dev *dev)
1321 {
1322         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1323         int loop;
1324
1325         PMD_INIT_FUNC_TRACE();
1326
1327         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1328                 return -EPERM;
1329
1330         if (!dpaa_intf) {
1331                 DPAA_PMD_WARN("Already closed or not started");
1332                 return -1;
1333         }
1334
1335         dpaa_eth_dev_close(dev);
1336
1337         /* release configuration memory */
1338         if (dpaa_intf->fc_conf)
1339                 rte_free(dpaa_intf->fc_conf);
1340
1341         /* Release RX congestion Groups */
1342         if (dpaa_intf->cgr_rx) {
1343                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1344                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1345
1346                 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1347                                          dpaa_intf->nb_rx_queues);
1348         }
1349
1350         rte_free(dpaa_intf->cgr_rx);
1351         dpaa_intf->cgr_rx = NULL;
1352
1353         rte_free(dpaa_intf->rx_queues);
1354         dpaa_intf->rx_queues = NULL;
1355
1356         rte_free(dpaa_intf->tx_queues);
1357         dpaa_intf->tx_queues = NULL;
1358
1359         /* free memory for storing MAC addresses */
1360         rte_free(dev->data->mac_addrs);
1361         dev->data->mac_addrs = NULL;
1362
1363         dev->dev_ops = NULL;
1364         dev->rx_pkt_burst = NULL;
1365         dev->tx_pkt_burst = NULL;
1366
1367         return 0;
1368 }
1369
1370 static int
1371 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1372                struct rte_dpaa_device *dpaa_dev)
1373 {
1374         int diag;
1375         int ret;
1376         struct rte_eth_dev *eth_dev;
1377
1378         PMD_INIT_FUNC_TRACE();
1379
1380         /* In case of secondary process, the device is already configured
1381          * and no further action is required, except portal initialization
1382          * and verifying secondary attachment to port name.
1383          */
1384         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1385                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1386                 if (!eth_dev)
1387                         return -ENOMEM;
1388                 return 0;
1389         }
1390
1391         if (!is_global_init) {
1392                 /* One time load of Qman/Bman drivers */
1393                 ret = qman_global_init();
1394                 if (ret) {
1395                         DPAA_PMD_ERR("QMAN initialization failed: %d",
1396                                      ret);
1397                         return ret;
1398                 }
1399                 ret = bman_global_init();
1400                 if (ret) {
1401                         DPAA_PMD_ERR("BMAN initialization failed: %d",
1402                                      ret);
1403                         return ret;
1404                 }
1405
1406                 is_global_init = 1;
1407         }
1408
1409         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1410                 ret = rte_dpaa_portal_init((void *)1);
1411                 if (ret) {
1412                         DPAA_PMD_ERR("Unable to initialize portal");
1413                         return ret;
1414                 }
1415         }
1416
1417         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1418         if (eth_dev == NULL)
1419                 return -ENOMEM;
1420
1421         eth_dev->data->dev_private = rte_zmalloc(
1422                                         "ethdev private structure",
1423                                         sizeof(struct dpaa_if),
1424                                         RTE_CACHE_LINE_SIZE);
1425         if (!eth_dev->data->dev_private) {
1426                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1427                 rte_eth_dev_release_port(eth_dev);
1428                 return -ENOMEM;
1429         }
1430
1431         eth_dev->device = &dpaa_dev->device;
1432         eth_dev->device->driver = &dpaa_drv->driver;
1433         dpaa_dev->eth_dev = eth_dev;
1434
1435         /* Invoke PMD device initialization function */
1436         diag = dpaa_dev_init(eth_dev);
1437         if (diag == 0)
1438                 return 0;
1439
1440         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1441                 rte_free(eth_dev->data->dev_private);
1442
1443         rte_eth_dev_release_port(eth_dev);
1444         return diag;
1445 }
1446
1447 static int
1448 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1449 {
1450         struct rte_eth_dev *eth_dev;
1451
1452         PMD_INIT_FUNC_TRACE();
1453
1454         eth_dev = dpaa_dev->eth_dev;
1455         dpaa_dev_uninit(eth_dev);
1456
1457         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1458                 rte_free(eth_dev->data->dev_private);
1459
1460         rte_eth_dev_release_port(eth_dev);
1461
1462         return 0;
1463 }
1464
1465 static struct rte_dpaa_driver rte_dpaa_pmd = {
1466         .drv_type = FSL_DPAA_ETH,
1467         .probe = rte_dpaa_probe,
1468         .remove = rte_dpaa_remove,
1469 };
1470
1471 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);