ethdev: return diagnostic when setting MAC address
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_byteorder.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_log.h>
22 #include <rte_debug.h>
23 #include <rte_pci.h>
24 #include <rte_atomic.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_memory.h>
27 #include <rte_tailq.h>
28 #include <rte_eal.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_malloc.h>
33 #include <rte_ring.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaa_mempool.h>
38
39 #include <dpaa_ethdev.h>
40 #include <dpaa_rxtx.h>
41 #include <rte_pmd_dpaa.h>
42
43 #include <fsl_usd.h>
44 #include <fsl_qman.h>
45 #include <fsl_bman.h>
46 #include <fsl_fman.h>
47
48 /* Keep track of whether QMAN and BMAN have been globally initialized */
49 static int is_global_init;
50 /* At present we only allow up to 4 push mode queues - as each of this queue
51  * need dedicated portal and we are short of portals.
52  */
53 #define DPAA_MAX_PUSH_MODE_QUEUE       4
54
55 static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
56 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
57
58
59 /* Per FQ Taildrop in frame count */
60 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
61
62 struct rte_dpaa_xstats_name_off {
63         char name[RTE_ETH_XSTATS_NAME_SIZE];
64         uint32_t offset;
65 };
66
67 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
68         {"rx_align_err",
69                 offsetof(struct dpaa_if_stats, raln)},
70         {"rx_valid_pause",
71                 offsetof(struct dpaa_if_stats, rxpf)},
72         {"rx_fcs_err",
73                 offsetof(struct dpaa_if_stats, rfcs)},
74         {"rx_vlan_frame",
75                 offsetof(struct dpaa_if_stats, rvlan)},
76         {"rx_frame_err",
77                 offsetof(struct dpaa_if_stats, rerr)},
78         {"rx_drop_err",
79                 offsetof(struct dpaa_if_stats, rdrp)},
80         {"rx_undersized",
81                 offsetof(struct dpaa_if_stats, rund)},
82         {"rx_oversize_err",
83                 offsetof(struct dpaa_if_stats, rovr)},
84         {"rx_fragment_pkt",
85                 offsetof(struct dpaa_if_stats, rfrg)},
86         {"tx_valid_pause",
87                 offsetof(struct dpaa_if_stats, txpf)},
88         {"tx_fcs_err",
89                 offsetof(struct dpaa_if_stats, terr)},
90         {"tx_vlan_frame",
91                 offsetof(struct dpaa_if_stats, tvlan)},
92         {"rx_undersized",
93                 offsetof(struct dpaa_if_stats, tund)},
94 };
95
96 static struct rte_dpaa_driver rte_dpaa_pmd;
97
98 static inline void
99 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
100 {
101         memset(opts, 0, sizeof(struct qm_mcc_initfq));
102         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
103         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
104                            QM_FQCTRL_PREFERINCACHE;
105         opts->fqd.context_a.stashing.exclusive = 0;
106         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
107                 opts->fqd.context_a.stashing.annotation_cl =
108                                                 DPAA_IF_RX_ANNOTATION_STASH;
109         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
110         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
111 }
112
113 static int
114 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
115 {
116         struct dpaa_if *dpaa_intf = dev->data->dev_private;
117         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
118                                 + VLAN_TAG_SIZE;
119
120         PMD_INIT_FUNC_TRACE();
121
122         if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
123                 return -EINVAL;
124         if (frame_size > ETHER_MAX_LEN)
125                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
126         else
127                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
128
129         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
130
131         fman_if_set_maxfrm(dpaa_intf->fif, frame_size);
132
133         return 0;
134 }
135
136 static int
137 dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
138 {
139         struct dpaa_if *dpaa_intf = dev->data->dev_private;
140
141         PMD_INIT_FUNC_TRACE();
142
143         if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
144                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
145                     DPAA_MAX_RX_PKT_LEN) {
146                         fman_if_set_maxfrm(dpaa_intf->fif,
147                                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
148                         return 0;
149                 } else {
150                         return -1;
151                 }
152         }
153         return 0;
154 }
155
156 static const uint32_t *
157 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
158 {
159         static const uint32_t ptypes[] = {
160                 /*todo -= add more types */
161                 RTE_PTYPE_L2_ETHER,
162                 RTE_PTYPE_L3_IPV4,
163                 RTE_PTYPE_L3_IPV4_EXT,
164                 RTE_PTYPE_L3_IPV6,
165                 RTE_PTYPE_L3_IPV6_EXT,
166                 RTE_PTYPE_L4_TCP,
167                 RTE_PTYPE_L4_UDP,
168                 RTE_PTYPE_L4_SCTP
169         };
170
171         PMD_INIT_FUNC_TRACE();
172
173         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
174                 return ptypes;
175         return NULL;
176 }
177
178 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
179 {
180         struct dpaa_if *dpaa_intf = dev->data->dev_private;
181
182         PMD_INIT_FUNC_TRACE();
183
184         /* Change tx callback to the real one */
185         dev->tx_pkt_burst = dpaa_eth_queue_tx;
186         fman_if_enable_rx(dpaa_intf->fif);
187
188         return 0;
189 }
190
191 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
192 {
193         struct dpaa_if *dpaa_intf = dev->data->dev_private;
194
195         PMD_INIT_FUNC_TRACE();
196
197         fman_if_disable_rx(dpaa_intf->fif);
198         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
199 }
200
201 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
202 {
203         PMD_INIT_FUNC_TRACE();
204
205         dpaa_eth_dev_stop(dev);
206 }
207
208 static int
209 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
210                      char *fw_version,
211                      size_t fw_size)
212 {
213         int ret;
214         FILE *svr_file = NULL;
215         unsigned int svr_ver = 0;
216
217         PMD_INIT_FUNC_TRACE();
218
219         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
220         if (!svr_file) {
221                 DPAA_PMD_ERR("Unable to open SoC device");
222                 return -ENOTSUP; /* Not supported on this infra */
223         }
224         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
225                 dpaa_svr_family = svr_ver & SVR_MASK;
226         else
227                 DPAA_PMD_ERR("Unable to read SoC device");
228
229         fclose(svr_file);
230
231         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
232                        svr_ver, fman_ip_rev);
233         ret += 1; /* add the size of '\0' */
234
235         if (fw_size < (uint32_t)ret)
236                 return ret;
237         else
238                 return 0;
239 }
240
241 static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
242                               struct rte_eth_dev_info *dev_info)
243 {
244         struct dpaa_if *dpaa_intf = dev->data->dev_private;
245
246         PMD_INIT_FUNC_TRACE();
247
248         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
249         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
250         dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
251         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
252         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
253         dev_info->max_hash_mac_addrs = 0;
254         dev_info->max_vfs = 0;
255         dev_info->max_vmdq_pools = ETH_16_POOLS;
256         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
257         dev_info->speed_capa = (ETH_LINK_SPEED_1G |
258                                 ETH_LINK_SPEED_10G);
259         dev_info->rx_offload_capa =
260                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
261                 DEV_RX_OFFLOAD_UDP_CKSUM   |
262                 DEV_RX_OFFLOAD_TCP_CKSUM);
263         dev_info->tx_offload_capa =
264                 (DEV_TX_OFFLOAD_IPV4_CKSUM  |
265                 DEV_TX_OFFLOAD_UDP_CKSUM   |
266                 DEV_TX_OFFLOAD_TCP_CKSUM);
267 }
268
269 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
270                                 int wait_to_complete __rte_unused)
271 {
272         struct dpaa_if *dpaa_intf = dev->data->dev_private;
273         struct rte_eth_link *link = &dev->data->dev_link;
274
275         PMD_INIT_FUNC_TRACE();
276
277         if (dpaa_intf->fif->mac_type == fman_mac_1g)
278                 link->link_speed = ETH_SPEED_NUM_1G;
279         else if (dpaa_intf->fif->mac_type == fman_mac_10g)
280                 link->link_speed = ETH_SPEED_NUM_10G;
281         else
282                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
283                              dpaa_intf->name, dpaa_intf->fif->mac_type);
284
285         link->link_status = dpaa_intf->valid;
286         link->link_duplex = ETH_LINK_FULL_DUPLEX;
287         link->link_autoneg = ETH_LINK_AUTONEG;
288         return 0;
289 }
290
291 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
292                                struct rte_eth_stats *stats)
293 {
294         struct dpaa_if *dpaa_intf = dev->data->dev_private;
295
296         PMD_INIT_FUNC_TRACE();
297
298         fman_if_stats_get(dpaa_intf->fif, stats);
299         return 0;
300 }
301
302 static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
303 {
304         struct dpaa_if *dpaa_intf = dev->data->dev_private;
305
306         PMD_INIT_FUNC_TRACE();
307
308         fman_if_stats_reset(dpaa_intf->fif);
309 }
310
311 static int
312 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
313                     unsigned int n)
314 {
315         struct dpaa_if *dpaa_intf = dev->data->dev_private;
316         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
317         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
318
319         if (xstats == NULL)
320                 return 0;
321
322         if (n < num)
323                 return num;
324
325         fman_if_stats_get_all(dpaa_intf->fif, values,
326                               sizeof(struct dpaa_if_stats) / 8);
327
328         for (i = 0; i < num; i++) {
329                 xstats[i].id = i;
330                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
331         }
332         return i;
333 }
334
335 static int
336 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
337                       struct rte_eth_xstat_name *xstats_names,
338                       unsigned int limit)
339 {
340         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
341
342         if (limit < stat_cnt)
343                 return stat_cnt;
344
345         if (xstats_names != NULL)
346                 for (i = 0; i < stat_cnt; i++)
347                         snprintf(xstats_names[i].name,
348                                  sizeof(xstats_names[i].name),
349                                  "%s",
350                                  dpaa_xstats_strings[i].name);
351
352         return stat_cnt;
353 }
354
355 static int
356 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
357                       uint64_t *values, unsigned int n)
358 {
359         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
360         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
361
362         if (!ids) {
363                 struct dpaa_if *dpaa_intf = dev->data->dev_private;
364
365                 if (n < stat_cnt)
366                         return stat_cnt;
367
368                 if (!values)
369                         return 0;
370
371                 fman_if_stats_get_all(dpaa_intf->fif, values_copy,
372                                       sizeof(struct dpaa_if_stats) / 8);
373
374                 for (i = 0; i < stat_cnt; i++)
375                         values[i] =
376                                 values_copy[dpaa_xstats_strings[i].offset / 8];
377
378                 return stat_cnt;
379         }
380
381         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
382
383         for (i = 0; i < n; i++) {
384                 if (ids[i] >= stat_cnt) {
385                         DPAA_PMD_ERR("id value isn't valid");
386                         return -1;
387                 }
388                 values[i] = values_copy[ids[i]];
389         }
390         return n;
391 }
392
393 static int
394 dpaa_xstats_get_names_by_id(
395         struct rte_eth_dev *dev,
396         struct rte_eth_xstat_name *xstats_names,
397         const uint64_t *ids,
398         unsigned int limit)
399 {
400         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
401         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
402
403         if (!ids)
404                 return dpaa_xstats_get_names(dev, xstats_names, limit);
405
406         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
407
408         for (i = 0; i < limit; i++) {
409                 if (ids[i] >= stat_cnt) {
410                         DPAA_PMD_ERR("id value isn't valid");
411                         return -1;
412                 }
413                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
414         }
415         return limit;
416 }
417
418 static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
419 {
420         struct dpaa_if *dpaa_intf = dev->data->dev_private;
421
422         PMD_INIT_FUNC_TRACE();
423
424         fman_if_promiscuous_enable(dpaa_intf->fif);
425 }
426
427 static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
428 {
429         struct dpaa_if *dpaa_intf = dev->data->dev_private;
430
431         PMD_INIT_FUNC_TRACE();
432
433         fman_if_promiscuous_disable(dpaa_intf->fif);
434 }
435
436 static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
437 {
438         struct dpaa_if *dpaa_intf = dev->data->dev_private;
439
440         PMD_INIT_FUNC_TRACE();
441
442         fman_if_set_mcast_filter_table(dpaa_intf->fif);
443 }
444
445 static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
446 {
447         struct dpaa_if *dpaa_intf = dev->data->dev_private;
448
449         PMD_INIT_FUNC_TRACE();
450
451         fman_if_reset_mcast_filter_table(dpaa_intf->fif);
452 }
453
454 static
455 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
456                             uint16_t nb_desc,
457                             unsigned int socket_id __rte_unused,
458                             const struct rte_eth_rxconf *rx_conf __rte_unused,
459                             struct rte_mempool *mp)
460 {
461         struct dpaa_if *dpaa_intf = dev->data->dev_private;
462         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
463         struct qm_mcc_initfq opts = {0};
464         u32 flags = 0;
465         int ret;
466
467         PMD_INIT_FUNC_TRACE();
468
469         DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
470
471         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
472                 struct fman_if_ic_params icp;
473                 uint32_t fd_offset;
474                 uint32_t bp_size;
475
476                 if (!mp->pool_data) {
477                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
478                         return -1;
479                 }
480                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
481
482                 memset(&icp, 0, sizeof(icp));
483                 /* set ICEOF for to the default value , which is 0*/
484                 icp.iciof = DEFAULT_ICIOF;
485                 icp.iceof = DEFAULT_RX_ICEOF;
486                 icp.icsz = DEFAULT_ICSZ;
487                 fman_if_set_ic_params(dpaa_intf->fif, &icp);
488
489                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
490                 fman_if_set_fdoff(dpaa_intf->fif, fd_offset);
491
492                 /* Buffer pool size should be equal to Dataroom Size*/
493                 bp_size = rte_pktmbuf_data_room_size(mp);
494                 fman_if_set_bp(dpaa_intf->fif, mp->size,
495                                dpaa_intf->bp_info->bpid, bp_size);
496                 dpaa_intf->valid = 1;
497                 DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
498                             dpaa_intf->name, fd_offset,
499                         fman_if_get_fdoff(dpaa_intf->fif));
500         }
501         /* checking if push mode only, no error check for now */
502         if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
503                 dpaa_push_queue_idx++;
504                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
505                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
506                                    QM_FQCTRL_CTXASTASHING |
507                                    QM_FQCTRL_PREFERINCACHE;
508                 opts.fqd.context_a.stashing.exclusive = 0;
509                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
510                  * So do not enable stashing in this case
511                  */
512                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
513                         opts.fqd.context_a.stashing.annotation_cl =
514                                                 DPAA_IF_RX_ANNOTATION_STASH;
515                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
516                 opts.fqd.context_a.stashing.context_cl =
517                                                 DPAA_IF_RX_CONTEXT_STASH;
518
519                 /*Create a channel and associate given queue with the channel*/
520                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
521                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
522                 opts.fqd.dest.channel = rxq->ch_id;
523                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
524                 flags = QMAN_INITFQ_FLAG_SCHED;
525
526                 /* Configure tail drop */
527                 if (dpaa_intf->cgr_rx) {
528                         opts.we_mask |= QM_INITFQ_WE_CGID;
529                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
530                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
531                 }
532                 ret = qman_init_fq(rxq, flags, &opts);
533                 if (ret)
534                         DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
535                                      " ret: %d", rxq->fqid, ret);
536                 rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
537                 rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
538                 rxq->is_static = true;
539         }
540         dev->data->rx_queues[queue_idx] = rxq;
541
542         /* configure the CGR size as per the desc size */
543         if (dpaa_intf->cgr_rx) {
544                 struct qm_mcc_initcgr cgr_opts = {0};
545
546                 /* Enable tail drop with cgr on this queue */
547                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
548                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
549                 if (ret) {
550                         DPAA_PMD_WARN(
551                                 "rx taildrop modify fail on fqid %d (ret=%d)",
552                                 rxq->fqid, ret);
553                 }
554         }
555
556         return 0;
557 }
558
559 int __rte_experimental
560 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
561                 int eth_rx_queue_id,
562                 u16 ch_id,
563                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
564 {
565         int ret;
566         u32 flags = 0;
567         struct dpaa_if *dpaa_intf = dev->data->dev_private;
568         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
569         struct qm_mcc_initfq opts = {0};
570
571         if (dpaa_push_mode_max_queue)
572                 DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
573                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
574                               dpaa_push_mode_max_queue);
575
576         dpaa_poll_queue_default_config(&opts);
577
578         switch (queue_conf->ev.sched_type) {
579         case RTE_SCHED_TYPE_ATOMIC:
580                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
581                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
582                  * configuration with HOLD_ACTIVE setting
583                  */
584                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
585                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
586                 break;
587         case RTE_SCHED_TYPE_ORDERED:
588                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
589                 return -1;
590         default:
591                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
592                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
593                 break;
594         }
595
596         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
597         opts.fqd.dest.channel = ch_id;
598         opts.fqd.dest.wq = queue_conf->ev.priority;
599
600         if (dpaa_intf->cgr_rx) {
601                 opts.we_mask |= QM_INITFQ_WE_CGID;
602                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
603                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
604         }
605
606         flags = QMAN_INITFQ_FLAG_SCHED;
607
608         ret = qman_init_fq(rxq, flags, &opts);
609         if (ret) {
610                 DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
611                              rxq->fqid, ret);
612                 return ret;
613         }
614
615         /* copy configuration which needs to be filled during dequeue */
616         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
617         dev->data->rx_queues[eth_rx_queue_id] = rxq;
618
619         return ret;
620 }
621
622 int __rte_experimental
623 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
624                 int eth_rx_queue_id)
625 {
626         struct qm_mcc_initfq opts;
627         int ret;
628         u32 flags = 0;
629         struct dpaa_if *dpaa_intf = dev->data->dev_private;
630         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
631
632         dpaa_poll_queue_default_config(&opts);
633
634         if (dpaa_intf->cgr_rx) {
635                 opts.we_mask |= QM_INITFQ_WE_CGID;
636                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
637                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
638         }
639
640         ret = qman_init_fq(rxq, flags, &opts);
641         if (ret) {
642                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
643                              rxq->fqid, ret);
644         }
645
646         rxq->cb.dqrr_dpdk_cb = NULL;
647         dev->data->rx_queues[eth_rx_queue_id] = NULL;
648
649         return 0;
650 }
651
652 static
653 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
654 {
655         PMD_INIT_FUNC_TRACE();
656 }
657
658 static
659 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
660                             uint16_t nb_desc __rte_unused,
661                 unsigned int socket_id __rte_unused,
662                 const struct rte_eth_txconf *tx_conf __rte_unused)
663 {
664         struct dpaa_if *dpaa_intf = dev->data->dev_private;
665
666         PMD_INIT_FUNC_TRACE();
667
668         DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
669         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
670         return 0;
671 }
672
673 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
674 {
675         PMD_INIT_FUNC_TRACE();
676 }
677
678 static uint32_t
679 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
680 {
681         struct dpaa_if *dpaa_intf = dev->data->dev_private;
682         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
683         u32 frm_cnt = 0;
684
685         PMD_INIT_FUNC_TRACE();
686
687         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
688                 RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
689                         rx_queue_id, frm_cnt);
690         }
691         return frm_cnt;
692 }
693
694 static int dpaa_link_down(struct rte_eth_dev *dev)
695 {
696         PMD_INIT_FUNC_TRACE();
697
698         dpaa_eth_dev_stop(dev);
699         return 0;
700 }
701
702 static int dpaa_link_up(struct rte_eth_dev *dev)
703 {
704         PMD_INIT_FUNC_TRACE();
705
706         dpaa_eth_dev_start(dev);
707         return 0;
708 }
709
710 static int
711 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
712                    struct rte_eth_fc_conf *fc_conf)
713 {
714         struct dpaa_if *dpaa_intf = dev->data->dev_private;
715         struct rte_eth_fc_conf *net_fc;
716
717         PMD_INIT_FUNC_TRACE();
718
719         if (!(dpaa_intf->fc_conf)) {
720                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
721                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
722                 if (!dpaa_intf->fc_conf) {
723                         DPAA_PMD_ERR("unable to save flow control info");
724                         return -ENOMEM;
725                 }
726         }
727         net_fc = dpaa_intf->fc_conf;
728
729         if (fc_conf->high_water < fc_conf->low_water) {
730                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
731                 return -EINVAL;
732         }
733
734         if (fc_conf->mode == RTE_FC_NONE) {
735                 return 0;
736         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
737                  fc_conf->mode == RTE_FC_FULL) {
738                 fman_if_set_fc_threshold(dpaa_intf->fif, fc_conf->high_water,
739                                          fc_conf->low_water,
740                                 dpaa_intf->bp_info->bpid);
741                 if (fc_conf->pause_time)
742                         fman_if_set_fc_quanta(dpaa_intf->fif,
743                                               fc_conf->pause_time);
744         }
745
746         /* Save the information in dpaa device */
747         net_fc->pause_time = fc_conf->pause_time;
748         net_fc->high_water = fc_conf->high_water;
749         net_fc->low_water = fc_conf->low_water;
750         net_fc->send_xon = fc_conf->send_xon;
751         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
752         net_fc->mode = fc_conf->mode;
753         net_fc->autoneg = fc_conf->autoneg;
754
755         return 0;
756 }
757
758 static int
759 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
760                    struct rte_eth_fc_conf *fc_conf)
761 {
762         struct dpaa_if *dpaa_intf = dev->data->dev_private;
763         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
764         int ret;
765
766         PMD_INIT_FUNC_TRACE();
767
768         if (net_fc) {
769                 fc_conf->pause_time = net_fc->pause_time;
770                 fc_conf->high_water = net_fc->high_water;
771                 fc_conf->low_water = net_fc->low_water;
772                 fc_conf->send_xon = net_fc->send_xon;
773                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
774                 fc_conf->mode = net_fc->mode;
775                 fc_conf->autoneg = net_fc->autoneg;
776                 return 0;
777         }
778         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
779         if (ret) {
780                 fc_conf->mode = RTE_FC_TX_PAUSE;
781                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
782         } else {
783                 fc_conf->mode = RTE_FC_NONE;
784         }
785
786         return 0;
787 }
788
789 static int
790 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
791                              struct ether_addr *addr,
792                              uint32_t index,
793                              __rte_unused uint32_t pool)
794 {
795         int ret;
796         struct dpaa_if *dpaa_intf = dev->data->dev_private;
797
798         PMD_INIT_FUNC_TRACE();
799
800         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
801
802         if (ret)
803                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
804                         " err = %d", ret);
805         return 0;
806 }
807
808 static void
809 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
810                           uint32_t index)
811 {
812         struct dpaa_if *dpaa_intf = dev->data->dev_private;
813
814         PMD_INIT_FUNC_TRACE();
815
816         fman_if_clear_mac_addr(dpaa_intf->fif, index);
817 }
818
819 static int
820 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
821                        struct ether_addr *addr)
822 {
823         int ret;
824         struct dpaa_if *dpaa_intf = dev->data->dev_private;
825
826         PMD_INIT_FUNC_TRACE();
827
828         ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
829         if (ret)
830                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
831
832         return ret;
833 }
834
835 static struct eth_dev_ops dpaa_devops = {
836         .dev_configure            = dpaa_eth_dev_configure,
837         .dev_start                = dpaa_eth_dev_start,
838         .dev_stop                 = dpaa_eth_dev_stop,
839         .dev_close                = dpaa_eth_dev_close,
840         .dev_infos_get            = dpaa_eth_dev_info,
841         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
842
843         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
844         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
845         .rx_queue_release         = dpaa_eth_rx_queue_release,
846         .tx_queue_release         = dpaa_eth_tx_queue_release,
847         .rx_queue_count           = dpaa_dev_rx_queue_count,
848
849         .flow_ctrl_get            = dpaa_flow_ctrl_get,
850         .flow_ctrl_set            = dpaa_flow_ctrl_set,
851
852         .link_update              = dpaa_eth_link_update,
853         .stats_get                = dpaa_eth_stats_get,
854         .xstats_get               = dpaa_dev_xstats_get,
855         .xstats_get_by_id         = dpaa_xstats_get_by_id,
856         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
857         .xstats_get_names         = dpaa_xstats_get_names,
858         .xstats_reset             = dpaa_eth_stats_reset,
859         .stats_reset              = dpaa_eth_stats_reset,
860         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
861         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
862         .allmulticast_enable      = dpaa_eth_multicast_enable,
863         .allmulticast_disable     = dpaa_eth_multicast_disable,
864         .mtu_set                  = dpaa_mtu_set,
865         .dev_set_link_down        = dpaa_link_down,
866         .dev_set_link_up          = dpaa_link_up,
867         .mac_addr_add             = dpaa_dev_add_mac_addr,
868         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
869         .mac_addr_set             = dpaa_dev_set_mac_addr,
870
871         .fw_version_get           = dpaa_fw_version_get,
872 };
873
874 static bool
875 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
876 {
877         if (strcmp(dev->device->driver->name,
878                    drv->driver.name))
879                 return false;
880
881         return true;
882 }
883
884 static bool
885 is_dpaa_supported(struct rte_eth_dev *dev)
886 {
887         return is_device_supported(dev, &rte_dpaa_pmd);
888 }
889
890 int __rte_experimental
891 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
892 {
893         struct rte_eth_dev *dev;
894         struct dpaa_if *dpaa_intf;
895
896         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
897
898         dev = &rte_eth_devices[port];
899
900         if (!is_dpaa_supported(dev))
901                 return -ENOTSUP;
902
903         dpaa_intf = dev->data->dev_private;
904
905         if (on)
906                 fman_if_loopback_enable(dpaa_intf->fif);
907         else
908                 fman_if_loopback_disable(dpaa_intf->fif);
909
910         return 0;
911 }
912
913 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
914 {
915         struct rte_eth_fc_conf *fc_conf;
916         int ret;
917
918         PMD_INIT_FUNC_TRACE();
919
920         if (!(dpaa_intf->fc_conf)) {
921                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
922                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
923                 if (!dpaa_intf->fc_conf) {
924                         DPAA_PMD_ERR("unable to save flow control info");
925                         return -ENOMEM;
926                 }
927         }
928         fc_conf = dpaa_intf->fc_conf;
929         ret = fman_if_get_fc_threshold(dpaa_intf->fif);
930         if (ret) {
931                 fc_conf->mode = RTE_FC_TX_PAUSE;
932                 fc_conf->pause_time = fman_if_get_fc_quanta(dpaa_intf->fif);
933         } else {
934                 fc_conf->mode = RTE_FC_NONE;
935         }
936
937         return 0;
938 }
939
940 /* Initialise an Rx FQ */
941 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
942                               uint32_t fqid)
943 {
944         struct qm_mcc_initfq opts = {0};
945         int ret;
946         u32 flags = 0;
947         struct qm_mcc_initcgr cgr_opts = {
948                 .we_mask = QM_CGR_WE_CS_THRES |
949                                 QM_CGR_WE_CSTD_EN |
950                                 QM_CGR_WE_MODE,
951                 .cgr = {
952                         .cstd_en = QM_CGR_EN,
953                         .mode = QMAN_CGR_MODE_FRAME
954                 }
955         };
956
957         PMD_INIT_FUNC_TRACE();
958
959         ret = qman_reserve_fqid(fqid);
960         if (ret) {
961                 DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
962                              fqid, ret);
963                 return -EINVAL;
964         }
965
966         DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
967         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
968         if (ret) {
969                 DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
970                         fqid, ret);
971                 return ret;
972         }
973         fq->is_static = false;
974
975         dpaa_poll_queue_default_config(&opts);
976
977         if (cgr_rx) {
978                 /* Enable tail drop with cgr on this queue */
979                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
980                 cgr_rx->cb = NULL;
981                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
982                                       &cgr_opts);
983                 if (ret) {
984                         DPAA_PMD_WARN(
985                                 "rx taildrop init fail on rx fqid %d (ret=%d)",
986                                 fqid, ret);
987                         goto without_cgr;
988                 }
989                 opts.we_mask |= QM_INITFQ_WE_CGID;
990                 opts.fqd.cgid = cgr_rx->cgrid;
991                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
992         }
993 without_cgr:
994         ret = qman_init_fq(fq, flags, &opts);
995         if (ret)
996                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
997         return ret;
998 }
999
1000 /* Initialise a Tx FQ */
1001 static int dpaa_tx_queue_init(struct qman_fq *fq,
1002                               struct fman_if *fman_intf)
1003 {
1004         struct qm_mcc_initfq opts = {0};
1005         int ret;
1006
1007         PMD_INIT_FUNC_TRACE();
1008
1009         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1010                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1011         if (ret) {
1012                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1013                 return ret;
1014         }
1015         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1016                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1017         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1018         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1019         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1020         opts.fqd.context_b = 0;
1021         /* no tx-confirmation */
1022         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1023         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1024         DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
1025         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1026         if (ret)
1027                 DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
1028         return ret;
1029 }
1030
1031 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1032 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1033 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1034 {
1035         struct qm_mcc_initfq opts = {0};
1036         int ret;
1037
1038         PMD_INIT_FUNC_TRACE();
1039
1040         ret = qman_reserve_fqid(fqid);
1041         if (ret) {
1042                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1043                         fqid, ret);
1044                 return -EINVAL;
1045         }
1046         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1047         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1048         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1049         if (ret) {
1050                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1051                         fqid, ret);
1052                 return ret;
1053         }
1054         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1055         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1056         ret = qman_init_fq(fq, 0, &opts);
1057         if (ret)
1058                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1059                             fqid, ret);
1060         return ret;
1061 }
1062 #endif
1063
1064 /* Initialise a network interface */
1065 static int
1066 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1067 {
1068         int num_cores, num_rx_fqs, fqid;
1069         int loop, ret = 0;
1070         int dev_id;
1071         struct rte_dpaa_device *dpaa_device;
1072         struct dpaa_if *dpaa_intf;
1073         struct fm_eth_port_cfg *cfg;
1074         struct fman_if *fman_intf;
1075         struct fman_if_bpool *bp, *tmp_bp;
1076         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1077
1078         PMD_INIT_FUNC_TRACE();
1079
1080         /* For secondary processes, the primary has done all the work */
1081         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1082                 return 0;
1083
1084         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1085         dev_id = dpaa_device->id.dev_id;
1086         dpaa_intf = eth_dev->data->dev_private;
1087         cfg = &dpaa_netcfg->port_cfg[dev_id];
1088         fman_intf = cfg->fman_if;
1089
1090         dpaa_intf->name = dpaa_device->name;
1091
1092         /* save fman_if & cfg in the interface struture */
1093         dpaa_intf->fif = fman_intf;
1094         dpaa_intf->ifid = dev_id;
1095         dpaa_intf->cfg = cfg;
1096
1097         /* Initialize Rx FQ's */
1098         if (getenv("DPAA_NUM_RX_QUEUES"))
1099                 num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
1100         else
1101                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1102
1103         /* if push mode queues to be enabled. Currenly we are allowing only
1104          * one queue per thread.
1105          */
1106         if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
1107                 dpaa_push_mode_max_queue =
1108                                 atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
1109                 if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
1110                         dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
1111         }
1112
1113         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1114          * queues.
1115          */
1116         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1117                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1118                 return -EINVAL;
1119         }
1120
1121         dpaa_intf->rx_queues = rte_zmalloc(NULL,
1122                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1123         if (!dpaa_intf->rx_queues) {
1124                 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1125                 return -ENOMEM;
1126         }
1127
1128         /* If congestion control is enabled globally*/
1129         if (td_threshold) {
1130                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1131                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1132                 if (!dpaa_intf->cgr_rx) {
1133                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1134                         ret = -ENOMEM;
1135                         goto free_rx;
1136                 }
1137
1138                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1139                 if (ret != num_rx_fqs) {
1140                         DPAA_PMD_WARN("insufficient CGRIDs available");
1141                         ret = -EINVAL;
1142                         goto free_rx;
1143                 }
1144         } else {
1145                 dpaa_intf->cgr_rx = NULL;
1146         }
1147
1148         for (loop = 0; loop < num_rx_fqs; loop++) {
1149                 fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
1150                         DPAA_PCD_FQID_MULTIPLIER + loop;
1151
1152                 if (dpaa_intf->cgr_rx)
1153                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1154
1155                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1156                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1157                         fqid);
1158                 if (ret)
1159                         goto free_rx;
1160                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1161         }
1162         dpaa_intf->nb_rx_queues = num_rx_fqs;
1163
1164         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1165         num_cores = rte_lcore_count();
1166         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1167                 num_cores, MAX_CACHELINE);
1168         if (!dpaa_intf->tx_queues) {
1169                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1170                 ret = -ENOMEM;
1171                 goto free_rx;
1172         }
1173
1174         for (loop = 0; loop < num_cores; loop++) {
1175                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1176                                          fman_intf);
1177                 if (ret)
1178                         goto free_tx;
1179                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1180         }
1181         dpaa_intf->nb_tx_queues = num_cores;
1182
1183 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1184         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1185                 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1186         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1187         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1188                 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1189         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1190 #endif
1191
1192         DPAA_PMD_DEBUG("All frame queues created");
1193
1194         /* Get the initial configuration for flow control */
1195         dpaa_fc_set_default(dpaa_intf);
1196
1197         /* reset bpool list, initialize bpool dynamically */
1198         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1199                 list_del(&bp->node);
1200                 free(bp);
1201         }
1202
1203         /* Populate ethdev structure */
1204         eth_dev->dev_ops = &dpaa_devops;
1205         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1206         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1207
1208         /* Allocate memory for storing MAC addresses */
1209         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1210                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1211         if (eth_dev->data->mac_addrs == NULL) {
1212                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1213                                                 "store MAC addresses",
1214                                 ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1215                 ret = -ENOMEM;
1216                 goto free_tx;
1217         }
1218
1219         /* copy the primary mac address */
1220         ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
1221
1222         RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
1223                 dpaa_device->name,
1224                 fman_intf->mac_addr.addr_bytes[0],
1225                 fman_intf->mac_addr.addr_bytes[1],
1226                 fman_intf->mac_addr.addr_bytes[2],
1227                 fman_intf->mac_addr.addr_bytes[3],
1228                 fman_intf->mac_addr.addr_bytes[4],
1229                 fman_intf->mac_addr.addr_bytes[5]);
1230
1231         /* Disable RX mode */
1232         fman_if_discard_rx_errors(fman_intf);
1233         fman_if_disable_rx(fman_intf);
1234         /* Disable promiscuous mode */
1235         fman_if_promiscuous_disable(fman_intf);
1236         /* Disable multicast */
1237         fman_if_reset_mcast_filter_table(fman_intf);
1238         /* Reset interface statistics */
1239         fman_if_stats_reset(fman_intf);
1240
1241         return 0;
1242
1243 free_tx:
1244         rte_free(dpaa_intf->tx_queues);
1245         dpaa_intf->tx_queues = NULL;
1246         dpaa_intf->nb_tx_queues = 0;
1247
1248 free_rx:
1249         rte_free(dpaa_intf->cgr_rx);
1250         rte_free(dpaa_intf->rx_queues);
1251         dpaa_intf->rx_queues = NULL;
1252         dpaa_intf->nb_rx_queues = 0;
1253         return ret;
1254 }
1255
1256 static int
1257 dpaa_dev_uninit(struct rte_eth_dev *dev)
1258 {
1259         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1260         int loop;
1261
1262         PMD_INIT_FUNC_TRACE();
1263
1264         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1265                 return -EPERM;
1266
1267         if (!dpaa_intf) {
1268                 DPAA_PMD_WARN("Already closed or not started");
1269                 return -1;
1270         }
1271
1272         dpaa_eth_dev_close(dev);
1273
1274         /* release configuration memory */
1275         if (dpaa_intf->fc_conf)
1276                 rte_free(dpaa_intf->fc_conf);
1277
1278         /* Release RX congestion Groups */
1279         if (dpaa_intf->cgr_rx) {
1280                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1281                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1282
1283                 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1284                                          dpaa_intf->nb_rx_queues);
1285         }
1286
1287         rte_free(dpaa_intf->cgr_rx);
1288         dpaa_intf->cgr_rx = NULL;
1289
1290         rte_free(dpaa_intf->rx_queues);
1291         dpaa_intf->rx_queues = NULL;
1292
1293         rte_free(dpaa_intf->tx_queues);
1294         dpaa_intf->tx_queues = NULL;
1295
1296         /* free memory for storing MAC addresses */
1297         rte_free(dev->data->mac_addrs);
1298         dev->data->mac_addrs = NULL;
1299
1300         dev->dev_ops = NULL;
1301         dev->rx_pkt_burst = NULL;
1302         dev->tx_pkt_burst = NULL;
1303
1304         return 0;
1305 }
1306
1307 static int
1308 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
1309                struct rte_dpaa_device *dpaa_dev)
1310 {
1311         int diag;
1312         int ret;
1313         struct rte_eth_dev *eth_dev;
1314
1315         PMD_INIT_FUNC_TRACE();
1316
1317         /* In case of secondary process, the device is already configured
1318          * and no further action is required, except portal initialization
1319          * and verifying secondary attachment to port name.
1320          */
1321         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1322                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1323                 if (!eth_dev)
1324                         return -ENOMEM;
1325                 return 0;
1326         }
1327
1328         if (!is_global_init) {
1329                 /* One time load of Qman/Bman drivers */
1330                 ret = qman_global_init();
1331                 if (ret) {
1332                         DPAA_PMD_ERR("QMAN initialization failed: %d",
1333                                      ret);
1334                         return ret;
1335                 }
1336                 ret = bman_global_init();
1337                 if (ret) {
1338                         DPAA_PMD_ERR("BMAN initialization failed: %d",
1339                                      ret);
1340                         return ret;
1341                 }
1342
1343                 is_global_init = 1;
1344         }
1345
1346         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1347                 ret = rte_dpaa_portal_init((void *)1);
1348                 if (ret) {
1349                         DPAA_PMD_ERR("Unable to initialize portal");
1350                         return ret;
1351                 }
1352         }
1353
1354         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1355         if (eth_dev == NULL)
1356                 return -ENOMEM;
1357
1358         eth_dev->data->dev_private = rte_zmalloc(
1359                                         "ethdev private structure",
1360                                         sizeof(struct dpaa_if),
1361                                         RTE_CACHE_LINE_SIZE);
1362         if (!eth_dev->data->dev_private) {
1363                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1364                 rte_eth_dev_release_port(eth_dev);
1365                 return -ENOMEM;
1366         }
1367
1368         eth_dev->device = &dpaa_dev->device;
1369         eth_dev->device->driver = &dpaa_drv->driver;
1370         dpaa_dev->eth_dev = eth_dev;
1371
1372         /* Invoke PMD device initialization function */
1373         diag = dpaa_dev_init(eth_dev);
1374         if (diag == 0)
1375                 return 0;
1376
1377         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1378                 rte_free(eth_dev->data->dev_private);
1379
1380         rte_eth_dev_release_port(eth_dev);
1381         return diag;
1382 }
1383
1384 static int
1385 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1386 {
1387         struct rte_eth_dev *eth_dev;
1388
1389         PMD_INIT_FUNC_TRACE();
1390
1391         eth_dev = dpaa_dev->eth_dev;
1392         dpaa_dev_uninit(eth_dev);
1393
1394         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1395                 rte_free(eth_dev->data->dev_private);
1396
1397         rte_eth_dev_release_port(eth_dev);
1398
1399         return 0;
1400 }
1401
1402 static struct rte_dpaa_driver rte_dpaa_pmd = {
1403         .drv_type = FSL_DPAA_ETH,
1404         .probe = rte_dpaa_probe,
1405         .remove = rte_dpaa_remove,
1406 };
1407
1408 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);