ea178f4d4c22a01735ec0842794ffeb00326f5aa
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2020 NXP
5  *
6  */
7 /* System headers */
8 #include <stdio.h>
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <limits.h>
12 #include <sched.h>
13 #include <signal.h>
14 #include <pthread.h>
15 #include <sys/types.h>
16 #include <sys/syscall.h>
17
18 #include <rte_string_fns.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_interrupts.h>
22 #include <rte_log.h>
23 #include <rte_debug.h>
24 #include <rte_pci.h>
25 #include <rte_atomic.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_memory.h>
28 #include <rte_tailq.h>
29 #include <rte_eal.h>
30 #include <rte_alarm.h>
31 #include <rte_ether.h>
32 #include <rte_ethdev_driver.h>
33 #include <rte_malloc.h>
34 #include <rte_ring.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38 #include <dpaa_mempool.h>
39
40 #include <dpaa_ethdev.h>
41 #include <dpaa_rxtx.h>
42 #include <rte_pmd_dpaa.h>
43
44 #include <fsl_usd.h>
45 #include <fsl_qman.h>
46 #include <fsl_bman.h>
47 #include <fsl_fman.h>
48 #include <process.h>
49
50 /* Supported Rx offloads */
51 static uint64_t dev_rx_offloads_sup =
52                 DEV_RX_OFFLOAD_JUMBO_FRAME |
53                 DEV_RX_OFFLOAD_SCATTER;
54
55 /* Rx offloads which cannot be disabled */
56 static uint64_t dev_rx_offloads_nodis =
57                 DEV_RX_OFFLOAD_IPV4_CKSUM |
58                 DEV_RX_OFFLOAD_UDP_CKSUM |
59                 DEV_RX_OFFLOAD_TCP_CKSUM |
60                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
61                 DEV_RX_OFFLOAD_RSS_HASH;
62
63 /* Supported Tx offloads */
64 static uint64_t dev_tx_offloads_sup =
65                 DEV_TX_OFFLOAD_MT_LOCKFREE |
66                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
67
68 /* Tx offloads which cannot be disabled */
69 static uint64_t dev_tx_offloads_nodis =
70                 DEV_TX_OFFLOAD_IPV4_CKSUM |
71                 DEV_TX_OFFLOAD_UDP_CKSUM |
72                 DEV_TX_OFFLOAD_TCP_CKSUM |
73                 DEV_TX_OFFLOAD_SCTP_CKSUM |
74                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
75                 DEV_TX_OFFLOAD_MULTI_SEGS;
76
77 /* Keep track of whether QMAN and BMAN have been globally initialized */
78 static int is_global_init;
79 static int default_q;   /* use default queue - FMC is not executed*/
80 /* At present we only allow up to 4 push mode queues as default - as each of
81  * this queue need dedicated portal and we are short of portals.
82  */
83 #define DPAA_MAX_PUSH_MODE_QUEUE       8
84 #define DPAA_DEFAULT_PUSH_MODE_QUEUE   4
85
86 static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
87 static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
88
89
90 /* Per RX FQ Taildrop in frame count */
91 static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
92
93 /* Per TX FQ Taildrop in frame count, disabled by default */
94 static unsigned int td_tx_threshold;
95
96 struct rte_dpaa_xstats_name_off {
97         char name[RTE_ETH_XSTATS_NAME_SIZE];
98         uint32_t offset;
99 };
100
101 static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
102         {"rx_align_err",
103                 offsetof(struct dpaa_if_stats, raln)},
104         {"rx_valid_pause",
105                 offsetof(struct dpaa_if_stats, rxpf)},
106         {"rx_fcs_err",
107                 offsetof(struct dpaa_if_stats, rfcs)},
108         {"rx_vlan_frame",
109                 offsetof(struct dpaa_if_stats, rvlan)},
110         {"rx_frame_err",
111                 offsetof(struct dpaa_if_stats, rerr)},
112         {"rx_drop_err",
113                 offsetof(struct dpaa_if_stats, rdrp)},
114         {"rx_undersized",
115                 offsetof(struct dpaa_if_stats, rund)},
116         {"rx_oversize_err",
117                 offsetof(struct dpaa_if_stats, rovr)},
118         {"rx_fragment_pkt",
119                 offsetof(struct dpaa_if_stats, rfrg)},
120         {"tx_valid_pause",
121                 offsetof(struct dpaa_if_stats, txpf)},
122         {"tx_fcs_err",
123                 offsetof(struct dpaa_if_stats, terr)},
124         {"tx_vlan_frame",
125                 offsetof(struct dpaa_if_stats, tvlan)},
126         {"rx_undersized",
127                 offsetof(struct dpaa_if_stats, tund)},
128 };
129
130 static struct rte_dpaa_driver rte_dpaa_pmd;
131
132 static int
133 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
134
135 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
136                                 int wait_to_complete __rte_unused);
137
138 static void dpaa_interrupt_handler(void *param);
139
140 static inline void
141 dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
142 {
143         memset(opts, 0, sizeof(struct qm_mcc_initfq));
144         opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
145         opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
146                            QM_FQCTRL_PREFERINCACHE;
147         opts->fqd.context_a.stashing.exclusive = 0;
148         if (dpaa_svr_family != SVR_LS1046A_FAMILY)
149                 opts->fqd.context_a.stashing.annotation_cl =
150                                                 DPAA_IF_RX_ANNOTATION_STASH;
151         opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
152         opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
153 }
154
155 static int
156 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
157 {
158         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
159                                 + VLAN_TAG_SIZE;
160         uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
161
162         PMD_INIT_FUNC_TRACE();
163
164         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
165                 return -EINVAL;
166         /*
167          * Refuse mtu that requires the support of scattered packets
168          * when this feature has not been enabled before.
169          */
170         if (dev->data->min_rx_buf_size &&
171                 !dev->data->scattered_rx && frame_size > buffsz) {
172                 DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
173                 return -EINVAL;
174         }
175
176         /* check <seg size> * <max_seg>  >= max_frame */
177         if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
178                 (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
179                 DPAA_PMD_ERR("Too big to fit for Max SG list %d",
180                                 buffsz * DPAA_SGT_MAX_ENTRIES);
181                 return -EINVAL;
182         }
183
184         if (frame_size > RTE_ETHER_MAX_LEN)
185                 dev->data->dev_conf.rxmode.offloads |=
186                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
187         else
188                 dev->data->dev_conf.rxmode.offloads &=
189                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
190
191         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
192
193         fman_if_set_maxfrm(dev->process_private, frame_size);
194
195         return 0;
196 }
197
198 static int
199 dpaa_eth_dev_configure(struct rte_eth_dev *dev)
200 {
201         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
202         uint64_t rx_offloads = eth_conf->rxmode.offloads;
203         uint64_t tx_offloads = eth_conf->txmode.offloads;
204         struct rte_device *rdev = dev->device;
205         struct rte_dpaa_device *dpaa_dev;
206         struct fman_if *fif = dev->process_private;
207         struct __fman_if *__fif;
208         struct rte_intr_handle *intr_handle;
209         int ret;
210
211         PMD_INIT_FUNC_TRACE();
212
213         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
214         intr_handle = &dpaa_dev->intr_handle;
215         __fif = container_of(fif, struct __fman_if, __if);
216
217         /* Rx offloads which are enabled by default */
218         if (dev_rx_offloads_nodis & ~rx_offloads) {
219                 DPAA_PMD_INFO(
220                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
221                 " fixed are 0x%" PRIx64,
222                 rx_offloads, dev_rx_offloads_nodis);
223         }
224
225         /* Tx offloads which are enabled by default */
226         if (dev_tx_offloads_nodis & ~tx_offloads) {
227                 DPAA_PMD_INFO(
228                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
229                 " fixed are 0x%" PRIx64,
230                 tx_offloads, dev_tx_offloads_nodis);
231         }
232
233         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
234                 uint32_t max_len;
235
236                 DPAA_PMD_DEBUG("enabling jumbo");
237
238                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
239                     DPAA_MAX_RX_PKT_LEN)
240                         max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
241                 else {
242                         DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
243                                 "supported is %d",
244                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
245                                 DPAA_MAX_RX_PKT_LEN);
246                         max_len = DPAA_MAX_RX_PKT_LEN;
247                 }
248
249                 fman_if_set_maxfrm(dev->process_private, max_len);
250                 dev->data->mtu = max_len
251                         - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
252         }
253
254         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
255                 DPAA_PMD_DEBUG("enabling scatter mode");
256                 fman_if_set_sg(dev->process_private, 1);
257                 dev->data->scattered_rx = 1;
258         }
259
260         /* if the interrupts were configured on this devices*/
261         if (intr_handle && intr_handle->fd) {
262                 if (dev->data->dev_conf.intr_conf.lsc != 0)
263                         rte_intr_callback_register(intr_handle,
264                                            dpaa_interrupt_handler,
265                                            (void *)dev);
266
267                 ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
268                 if (ret) {
269                         if (dev->data->dev_conf.intr_conf.lsc != 0) {
270                                 rte_intr_callback_unregister(intr_handle,
271                                         dpaa_interrupt_handler,
272                                         (void *)dev);
273                                 if (ret == EINVAL)
274                                         printf("Failed to enable interrupt: Not Supported\n");
275                                 else
276                                         printf("Failed to enable interrupt\n");
277                         }
278                         dev->data->dev_conf.intr_conf.lsc = 0;
279                         dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
280                 }
281         }
282         return 0;
283 }
284
285 static const uint32_t *
286 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
287 {
288         static const uint32_t ptypes[] = {
289                 RTE_PTYPE_L2_ETHER,
290                 RTE_PTYPE_L2_ETHER_VLAN,
291                 RTE_PTYPE_L2_ETHER_ARP,
292                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
293                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
294                 RTE_PTYPE_L4_ICMP,
295                 RTE_PTYPE_L4_TCP,
296                 RTE_PTYPE_L4_UDP,
297                 RTE_PTYPE_L4_FRAG,
298                 RTE_PTYPE_L4_TCP,
299                 RTE_PTYPE_L4_UDP,
300                 RTE_PTYPE_L4_SCTP
301         };
302
303         PMD_INIT_FUNC_TRACE();
304
305         if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
306                 return ptypes;
307         return NULL;
308 }
309
310 static void dpaa_interrupt_handler(void *param)
311 {
312         struct rte_eth_dev *dev = param;
313         struct rte_device *rdev = dev->device;
314         struct rte_dpaa_device *dpaa_dev;
315         struct rte_intr_handle *intr_handle;
316         uint64_t buf;
317         int bytes_read;
318
319         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
320         intr_handle = &dpaa_dev->intr_handle;
321
322         bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
323         if (bytes_read < 0)
324                 DPAA_PMD_ERR("Error reading eventfd\n");
325         dpaa_eth_link_update(dev, 0);
326         _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
327 }
328
329 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
330 {
331         struct dpaa_if *dpaa_intf = dev->data->dev_private;
332
333         PMD_INIT_FUNC_TRACE();
334
335         /* Change tx callback to the real one */
336         if (dpaa_intf->cgr_tx)
337                 dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
338         else
339                 dev->tx_pkt_burst = dpaa_eth_queue_tx;
340
341         fman_if_enable_rx(dev->process_private);
342
343         return 0;
344 }
345
346 static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
347 {
348         struct fman_if *fif = dev->process_private;
349
350         PMD_INIT_FUNC_TRACE();
351
352         fman_if_disable_rx(fif);
353         dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
354 }
355
356 static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
357 {
358         struct fman_if *fif = dev->process_private;
359         struct __fman_if *__fif;
360         struct rte_device *rdev = dev->device;
361         struct rte_dpaa_device *dpaa_dev;
362         struct rte_intr_handle *intr_handle;
363
364         PMD_INIT_FUNC_TRACE();
365
366         dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
367         intr_handle = &dpaa_dev->intr_handle;
368         __fif = container_of(fif, struct __fman_if, __if);
369
370         dpaa_eth_dev_stop(dev);
371
372         if (intr_handle && intr_handle->fd &&
373             dev->data->dev_conf.intr_conf.lsc != 0) {
374                 dpaa_intr_disable(__fif->node_name);
375                 rte_intr_callback_unregister(intr_handle,
376                                              dpaa_interrupt_handler,
377                                              (void *)dev);
378         }
379 }
380
381 static int
382 dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
383                      char *fw_version,
384                      size_t fw_size)
385 {
386         int ret;
387         FILE *svr_file = NULL;
388         unsigned int svr_ver = 0;
389
390         PMD_INIT_FUNC_TRACE();
391
392         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
393         if (!svr_file) {
394                 DPAA_PMD_ERR("Unable to open SoC device");
395                 return -ENOTSUP; /* Not supported on this infra */
396         }
397         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
398                 dpaa_svr_family = svr_ver & SVR_MASK;
399         else
400                 DPAA_PMD_ERR("Unable to read SoC device");
401
402         fclose(svr_file);
403
404         ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
405                        svr_ver, fman_ip_rev);
406         ret += 1; /* add the size of '\0' */
407
408         if (fw_size < (uint32_t)ret)
409                 return ret;
410         else
411                 return 0;
412 }
413
414 static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
415                              struct rte_eth_dev_info *dev_info)
416 {
417         struct dpaa_if *dpaa_intf = dev->data->dev_private;
418         struct fman_if *fif = dev->process_private;
419
420         DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
421
422         dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
423         dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
424         dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
425         dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
426         dev_info->max_hash_mac_addrs = 0;
427         dev_info->max_vfs = 0;
428         dev_info->max_vmdq_pools = ETH_16_POOLS;
429         dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
430
431         if (fif->mac_type == fman_mac_1g) {
432                 dev_info->speed_capa = ETH_LINK_SPEED_1G;
433         } else if (fif->mac_type == fman_mac_2_5g) {
434                 dev_info->speed_capa = ETH_LINK_SPEED_1G
435                                         | ETH_LINK_SPEED_2_5G;
436         } else if (fif->mac_type == fman_mac_10g) {
437                 dev_info->speed_capa = ETH_LINK_SPEED_1G
438                                         | ETH_LINK_SPEED_2_5G
439                                         | ETH_LINK_SPEED_10G;
440         } else {
441                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
442                              dpaa_intf->name, fif->mac_type);
443                 return -EINVAL;
444         }
445
446         dev_info->rx_offload_capa = dev_rx_offloads_sup |
447                                         dev_rx_offloads_nodis;
448         dev_info->tx_offload_capa = dev_tx_offloads_sup |
449                                         dev_tx_offloads_nodis;
450         dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
451         dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
452         dev_info->default_rxportconf.nb_queues = 1;
453         dev_info->default_txportconf.nb_queues = 1;
454         dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
455         dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
456
457         return 0;
458 }
459
460 static int
461 dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
462                         __rte_unused uint16_t queue_id,
463                         struct rte_eth_burst_mode *mode)
464 {
465         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
466         int ret = -EINVAL;
467         unsigned int i;
468         const struct burst_info {
469                 uint64_t flags;
470                 const char *output;
471         } rx_offload_map[] = {
472                         {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
473                         {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
474                         {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
475                         {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
476                         {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
477                         {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
478                         {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
479         };
480
481         /* Update Rx offload info */
482         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
483                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
484                         snprintf(mode->info, sizeof(mode->info), "%s",
485                                 rx_offload_map[i].output);
486                         ret = 0;
487                         break;
488                 }
489         }
490         return ret;
491 }
492
493 static int
494 dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
495                         __rte_unused uint16_t queue_id,
496                         struct rte_eth_burst_mode *mode)
497 {
498         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
499         int ret = -EINVAL;
500         unsigned int i;
501         const struct burst_info {
502                 uint64_t flags;
503                 const char *output;
504         } tx_offload_map[] = {
505                         {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
506                         {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
507                         {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
508                         {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
509                         {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
510                         {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
511                         {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
512                         {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
513         };
514
515         /* Update Tx offload info */
516         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
517                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
518                         snprintf(mode->info, sizeof(mode->info), "%s",
519                                 tx_offload_map[i].output);
520                         ret = 0;
521                         break;
522                 }
523         }
524         return ret;
525 }
526
527 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
528                                 int wait_to_complete __rte_unused)
529 {
530         struct dpaa_if *dpaa_intf = dev->data->dev_private;
531         struct rte_eth_link *link = &dev->data->dev_link;
532         struct fman_if *fif = dev->process_private;
533         struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
534         int ret;
535
536         PMD_INIT_FUNC_TRACE();
537
538         if (fif->mac_type == fman_mac_1g)
539                 link->link_speed = ETH_SPEED_NUM_1G;
540         else if (fif->mac_type == fman_mac_2_5g)
541                 link->link_speed = ETH_SPEED_NUM_2_5G;
542         else if (fif->mac_type == fman_mac_10g)
543                 link->link_speed = ETH_SPEED_NUM_10G;
544         else
545                 DPAA_PMD_ERR("invalid link_speed: %s, %d",
546                              dpaa_intf->name, fif->mac_type);
547
548         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
549                 ret = dpaa_get_link_status(__fif->node_name);
550                 if (ret < 0)
551                         return ret;
552                 link->link_status = ret;
553         } else {
554                 link->link_status = dpaa_intf->valid;
555         }
556
557         link->link_duplex = ETH_LINK_FULL_DUPLEX;
558         link->link_autoneg = ETH_LINK_AUTONEG;
559
560         DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
561                       link->link_status ? "Up" : "Down");
562         return 0;
563 }
564
565 static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
566                                struct rte_eth_stats *stats)
567 {
568         PMD_INIT_FUNC_TRACE();
569
570         fman_if_stats_get(dev->process_private, stats);
571         return 0;
572 }
573
574 static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
575 {
576         PMD_INIT_FUNC_TRACE();
577
578         fman_if_stats_reset(dev->process_private);
579
580         return 0;
581 }
582
583 static int
584 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
585                     unsigned int n)
586 {
587         unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
588         uint64_t values[sizeof(struct dpaa_if_stats) / 8];
589
590         if (n < num)
591                 return num;
592
593         if (xstats == NULL)
594                 return 0;
595
596         fman_if_stats_get_all(dev->process_private, values,
597                               sizeof(struct dpaa_if_stats) / 8);
598
599         for (i = 0; i < num; i++) {
600                 xstats[i].id = i;
601                 xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
602         }
603         return i;
604 }
605
606 static int
607 dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
608                       struct rte_eth_xstat_name *xstats_names,
609                       unsigned int limit)
610 {
611         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
612
613         if (limit < stat_cnt)
614                 return stat_cnt;
615
616         if (xstats_names != NULL)
617                 for (i = 0; i < stat_cnt; i++)
618                         strlcpy(xstats_names[i].name,
619                                 dpaa_xstats_strings[i].name,
620                                 sizeof(xstats_names[i].name));
621
622         return stat_cnt;
623 }
624
625 static int
626 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
627                       uint64_t *values, unsigned int n)
628 {
629         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
630         uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
631
632         if (!ids) {
633                 if (n < stat_cnt)
634                         return stat_cnt;
635
636                 if (!values)
637                         return 0;
638
639                 fman_if_stats_get_all(dev->process_private, values_copy,
640                                       sizeof(struct dpaa_if_stats) / 8);
641
642                 for (i = 0; i < stat_cnt; i++)
643                         values[i] =
644                                 values_copy[dpaa_xstats_strings[i].offset / 8];
645
646                 return stat_cnt;
647         }
648
649         dpaa_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
650
651         for (i = 0; i < n; i++) {
652                 if (ids[i] >= stat_cnt) {
653                         DPAA_PMD_ERR("id value isn't valid");
654                         return -1;
655                 }
656                 values[i] = values_copy[ids[i]];
657         }
658         return n;
659 }
660
661 static int
662 dpaa_xstats_get_names_by_id(
663         struct rte_eth_dev *dev,
664         struct rte_eth_xstat_name *xstats_names,
665         const uint64_t *ids,
666         unsigned int limit)
667 {
668         unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
669         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
670
671         if (!ids)
672                 return dpaa_xstats_get_names(dev, xstats_names, limit);
673
674         dpaa_xstats_get_names(dev, xstats_names_copy, limit);
675
676         for (i = 0; i < limit; i++) {
677                 if (ids[i] >= stat_cnt) {
678                         DPAA_PMD_ERR("id value isn't valid");
679                         return -1;
680                 }
681                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
682         }
683         return limit;
684 }
685
686 static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
687 {
688         PMD_INIT_FUNC_TRACE();
689
690         fman_if_promiscuous_enable(dev->process_private);
691
692         return 0;
693 }
694
695 static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
696 {
697         PMD_INIT_FUNC_TRACE();
698
699         fman_if_promiscuous_disable(dev->process_private);
700
701         return 0;
702 }
703
704 static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
705 {
706         PMD_INIT_FUNC_TRACE();
707
708         fman_if_set_mcast_filter_table(dev->process_private);
709
710         return 0;
711 }
712
713 static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
714 {
715         PMD_INIT_FUNC_TRACE();
716
717         fman_if_reset_mcast_filter_table(dev->process_private);
718
719         return 0;
720 }
721
722 static
723 int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
724                             uint16_t nb_desc,
725                             unsigned int socket_id __rte_unused,
726                             const struct rte_eth_rxconf *rx_conf,
727                             struct rte_mempool *mp)
728 {
729         struct dpaa_if *dpaa_intf = dev->data->dev_private;
730         struct fman_if *fif = dev->process_private;
731         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
732         struct qm_mcc_initfq opts = {0};
733         u32 flags = 0;
734         int ret;
735         u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
736
737         PMD_INIT_FUNC_TRACE();
738
739         if (queue_idx >= dev->data->nb_rx_queues) {
740                 rte_errno = EOVERFLOW;
741                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
742                       (void *)dev, queue_idx, dev->data->nb_rx_queues);
743                 return -rte_errno;
744         }
745
746         /* Rx deferred start is not supported */
747         if (rx_conf->rx_deferred_start) {
748                 DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
749                 return -EINVAL;
750         }
751
752         DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
753                         queue_idx, rxq->fqid);
754
755         /* Max packet can fit in single buffer */
756         if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
757                 ;
758         } else if (dev->data->dev_conf.rxmode.offloads &
759                         DEV_RX_OFFLOAD_SCATTER) {
760                 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
761                         buffsz * DPAA_SGT_MAX_ENTRIES) {
762                         DPAA_PMD_ERR("max RxPkt size %d too big to fit "
763                                 "MaxSGlist %d",
764                                 dev->data->dev_conf.rxmode.max_rx_pkt_len,
765                                 buffsz * DPAA_SGT_MAX_ENTRIES);
766                         rte_errno = EOVERFLOW;
767                         return -rte_errno;
768                 }
769         } else {
770                 DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
771                      " larger than a single mbuf (%u) and scattered"
772                      " mode has not been requested",
773                      dev->data->dev_conf.rxmode.max_rx_pkt_len,
774                      buffsz - RTE_PKTMBUF_HEADROOM);
775         }
776
777         if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
778                 struct fman_if_ic_params icp;
779                 uint32_t fd_offset;
780                 uint32_t bp_size;
781
782                 if (!mp->pool_data) {
783                         DPAA_PMD_ERR("Not an offloaded buffer pool!");
784                         return -1;
785                 }
786                 dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
787
788                 memset(&icp, 0, sizeof(icp));
789                 /* set ICEOF for to the default value , which is 0*/
790                 icp.iciof = DEFAULT_ICIOF;
791                 icp.iceof = DEFAULT_RX_ICEOF;
792                 icp.icsz = DEFAULT_ICSZ;
793                 fman_if_set_ic_params(fif, &icp);
794
795                 fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
796                 fman_if_set_fdoff(fif, fd_offset);
797
798                 /* Buffer pool size should be equal to Dataroom Size*/
799                 bp_size = rte_pktmbuf_data_room_size(mp);
800                 fman_if_set_bp(fif, mp->size,
801                                dpaa_intf->bp_info->bpid, bp_size);
802                 dpaa_intf->valid = 1;
803                 DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
804                                 dpaa_intf->name, fd_offset,
805                                 fman_if_get_fdoff(fif));
806         }
807         DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
808                 fman_if_get_sg_enable(fif),
809                 dev->data->dev_conf.rxmode.max_rx_pkt_len);
810         /* checking if push mode only, no error check for now */
811         if (!rxq->is_static &&
812             dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
813                 struct qman_portal *qp;
814                 int q_fd;
815
816                 dpaa_push_queue_idx++;
817                 opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
818                 opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
819                                    QM_FQCTRL_CTXASTASHING |
820                                    QM_FQCTRL_PREFERINCACHE;
821                 opts.fqd.context_a.stashing.exclusive = 0;
822                 /* In muticore scenario stashing becomes a bottleneck on LS1046.
823                  * So do not enable stashing in this case
824                  */
825                 if (dpaa_svr_family != SVR_LS1046A_FAMILY)
826                         opts.fqd.context_a.stashing.annotation_cl =
827                                                 DPAA_IF_RX_ANNOTATION_STASH;
828                 opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
829                 opts.fqd.context_a.stashing.context_cl =
830                                                 DPAA_IF_RX_CONTEXT_STASH;
831
832                 /*Create a channel and associate given queue with the channel*/
833                 qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
834                 opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
835                 opts.fqd.dest.channel = rxq->ch_id;
836                 opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
837                 flags = QMAN_INITFQ_FLAG_SCHED;
838
839                 /* Configure tail drop */
840                 if (dpaa_intf->cgr_rx) {
841                         opts.we_mask |= QM_INITFQ_WE_CGID;
842                         opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
843                         opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
844                 }
845                 ret = qman_init_fq(rxq, flags, &opts);
846                 if (ret) {
847                         DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
848                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
849                         return ret;
850                 }
851                 if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
852                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
853                 } else {
854                         rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
855                         rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
856                 }
857
858                 rxq->is_static = true;
859
860                 /* Allocate qman specific portals */
861                 qp = fsl_qman_fq_portal_create(&q_fd);
862                 if (!qp) {
863                         DPAA_PMD_ERR("Unable to alloc fq portal");
864                         return -1;
865                 }
866                 rxq->qp = qp;
867
868                 /* Set up the device interrupt handler */
869                 if (!dev->intr_handle) {
870                         struct rte_dpaa_device *dpaa_dev;
871                         struct rte_device *rdev = dev->device;
872
873                         dpaa_dev = container_of(rdev, struct rte_dpaa_device,
874                                                 device);
875                         dev->intr_handle = &dpaa_dev->intr_handle;
876                         dev->intr_handle->intr_vec = rte_zmalloc(NULL,
877                                         dpaa_push_mode_max_queue, 0);
878                         if (!dev->intr_handle->intr_vec) {
879                                 DPAA_PMD_ERR("intr_vec alloc failed");
880                                 return -ENOMEM;
881                         }
882                         dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
883                         dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
884                 }
885
886                 dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
887                 dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
888                 dev->intr_handle->efds[queue_idx] = q_fd;
889                 rxq->q_fd = q_fd;
890         }
891         rxq->bp_array = rte_dpaa_bpid_info;
892         dev->data->rx_queues[queue_idx] = rxq;
893
894         /* configure the CGR size as per the desc size */
895         if (dpaa_intf->cgr_rx) {
896                 struct qm_mcc_initcgr cgr_opts = {0};
897
898                 /* Enable tail drop with cgr on this queue */
899                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
900                 ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
901                 if (ret) {
902                         DPAA_PMD_WARN(
903                                 "rx taildrop modify fail on fqid %d (ret=%d)",
904                                 rxq->fqid, ret);
905                 }
906         }
907
908         return 0;
909 }
910
911 int
912 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
913                 int eth_rx_queue_id,
914                 u16 ch_id,
915                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
916 {
917         int ret;
918         u32 flags = 0;
919         struct dpaa_if *dpaa_intf = dev->data->dev_private;
920         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
921         struct qm_mcc_initfq opts = {0};
922
923         if (dpaa_push_mode_max_queue)
924                 DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
925                               "PUSH mode already enabled for first %d queues.\n"
926                               "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
927                               dpaa_push_mode_max_queue);
928
929         dpaa_poll_queue_default_config(&opts);
930
931         switch (queue_conf->ev.sched_type) {
932         case RTE_SCHED_TYPE_ATOMIC:
933                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
934                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
935                  * configuration with HOLD_ACTIVE setting
936                  */
937                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
938                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
939                 break;
940         case RTE_SCHED_TYPE_ORDERED:
941                 DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
942                 return -1;
943         default:
944                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
945                 rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
946                 break;
947         }
948
949         opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
950         opts.fqd.dest.channel = ch_id;
951         opts.fqd.dest.wq = queue_conf->ev.priority;
952
953         if (dpaa_intf->cgr_rx) {
954                 opts.we_mask |= QM_INITFQ_WE_CGID;
955                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
956                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
957         }
958
959         flags = QMAN_INITFQ_FLAG_SCHED;
960
961         ret = qman_init_fq(rxq, flags, &opts);
962         if (ret) {
963                 DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
964                                 "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
965                 return ret;
966         }
967
968         /* copy configuration which needs to be filled during dequeue */
969         memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
970         dev->data->rx_queues[eth_rx_queue_id] = rxq;
971
972         return ret;
973 }
974
975 int
976 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
977                 int eth_rx_queue_id)
978 {
979         struct qm_mcc_initfq opts;
980         int ret;
981         u32 flags = 0;
982         struct dpaa_if *dpaa_intf = dev->data->dev_private;
983         struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
984
985         dpaa_poll_queue_default_config(&opts);
986
987         if (dpaa_intf->cgr_rx) {
988                 opts.we_mask |= QM_INITFQ_WE_CGID;
989                 opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
990                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
991         }
992
993         ret = qman_init_fq(rxq, flags, &opts);
994         if (ret) {
995                 DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
996                              rxq->fqid, ret);
997         }
998
999         rxq->cb.dqrr_dpdk_cb = NULL;
1000         dev->data->rx_queues[eth_rx_queue_id] = NULL;
1001
1002         return 0;
1003 }
1004
1005 static
1006 void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
1007 {
1008         PMD_INIT_FUNC_TRACE();
1009 }
1010
1011 static
1012 int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1013                             uint16_t nb_desc __rte_unused,
1014                 unsigned int socket_id __rte_unused,
1015                 const struct rte_eth_txconf *tx_conf)
1016 {
1017         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1018
1019         PMD_INIT_FUNC_TRACE();
1020
1021         /* Tx deferred start is not supported */
1022         if (tx_conf->tx_deferred_start) {
1023                 DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
1024                 return -EINVAL;
1025         }
1026         if (queue_idx >= dev->data->nb_tx_queues) {
1027                 rte_errno = EOVERFLOW;
1028                 DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
1029                       (void *)dev, queue_idx, dev->data->nb_tx_queues);
1030                 return -rte_errno;
1031         }
1032
1033         DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
1034                         queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
1035         dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
1036
1037         return 0;
1038 }
1039
1040 static void dpaa_eth_tx_queue_release(void *txq __rte_unused)
1041 {
1042         PMD_INIT_FUNC_TRACE();
1043 }
1044
1045 static uint32_t
1046 dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1047 {
1048         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1049         struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
1050         u32 frm_cnt = 0;
1051
1052         PMD_INIT_FUNC_TRACE();
1053
1054         if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
1055                 DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
1056                                rx_queue_id, frm_cnt);
1057         }
1058         return frm_cnt;
1059 }
1060
1061 static int dpaa_link_down(struct rte_eth_dev *dev)
1062 {
1063         struct fman_if *fif = dev->process_private;
1064         struct __fman_if *__fif;
1065
1066         PMD_INIT_FUNC_TRACE();
1067
1068         __fif = container_of(fif, struct __fman_if, __if);
1069
1070         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1071                 dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
1072         else
1073                 dpaa_eth_dev_stop(dev);
1074         return 0;
1075 }
1076
1077 static int dpaa_link_up(struct rte_eth_dev *dev)
1078 {
1079         struct fman_if *fif = dev->process_private;
1080         struct __fman_if *__fif;
1081
1082         PMD_INIT_FUNC_TRACE();
1083
1084         __fif = container_of(fif, struct __fman_if, __if);
1085
1086         if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1087                 dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
1088         else
1089                 dpaa_eth_dev_start(dev);
1090         return 0;
1091 }
1092
1093 static int
1094 dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
1095                    struct rte_eth_fc_conf *fc_conf)
1096 {
1097         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1098         struct rte_eth_fc_conf *net_fc;
1099
1100         PMD_INIT_FUNC_TRACE();
1101
1102         if (!(dpaa_intf->fc_conf)) {
1103                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1104                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1105                 if (!dpaa_intf->fc_conf) {
1106                         DPAA_PMD_ERR("unable to save flow control info");
1107                         return -ENOMEM;
1108                 }
1109         }
1110         net_fc = dpaa_intf->fc_conf;
1111
1112         if (fc_conf->high_water < fc_conf->low_water) {
1113                 DPAA_PMD_ERR("Incorrect Flow Control Configuration");
1114                 return -EINVAL;
1115         }
1116
1117         if (fc_conf->mode == RTE_FC_NONE) {
1118                 return 0;
1119         } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
1120                  fc_conf->mode == RTE_FC_FULL) {
1121                 fman_if_set_fc_threshold(dev->process_private,
1122                                          fc_conf->high_water,
1123                                          fc_conf->low_water,
1124                                          dpaa_intf->bp_info->bpid);
1125                 if (fc_conf->pause_time)
1126                         fman_if_set_fc_quanta(dev->process_private,
1127                                               fc_conf->pause_time);
1128         }
1129
1130         /* Save the information in dpaa device */
1131         net_fc->pause_time = fc_conf->pause_time;
1132         net_fc->high_water = fc_conf->high_water;
1133         net_fc->low_water = fc_conf->low_water;
1134         net_fc->send_xon = fc_conf->send_xon;
1135         net_fc->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd;
1136         net_fc->mode = fc_conf->mode;
1137         net_fc->autoneg = fc_conf->autoneg;
1138
1139         return 0;
1140 }
1141
1142 static int
1143 dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
1144                    struct rte_eth_fc_conf *fc_conf)
1145 {
1146         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1147         struct rte_eth_fc_conf *net_fc = dpaa_intf->fc_conf;
1148         int ret;
1149
1150         PMD_INIT_FUNC_TRACE();
1151
1152         if (net_fc) {
1153                 fc_conf->pause_time = net_fc->pause_time;
1154                 fc_conf->high_water = net_fc->high_water;
1155                 fc_conf->low_water = net_fc->low_water;
1156                 fc_conf->send_xon = net_fc->send_xon;
1157                 fc_conf->mac_ctrl_frame_fwd = net_fc->mac_ctrl_frame_fwd;
1158                 fc_conf->mode = net_fc->mode;
1159                 fc_conf->autoneg = net_fc->autoneg;
1160                 return 0;
1161         }
1162         ret = fman_if_get_fc_threshold(dev->process_private);
1163         if (ret) {
1164                 fc_conf->mode = RTE_FC_TX_PAUSE;
1165                 fc_conf->pause_time =
1166                         fman_if_get_fc_quanta(dev->process_private);
1167         } else {
1168                 fc_conf->mode = RTE_FC_NONE;
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int
1175 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
1176                              struct rte_ether_addr *addr,
1177                              uint32_t index,
1178                              __rte_unused uint32_t pool)
1179 {
1180         int ret;
1181
1182         PMD_INIT_FUNC_TRACE();
1183
1184         ret = fman_if_add_mac_addr(dev->process_private,
1185                                    addr->addr_bytes, index);
1186
1187         if (ret)
1188                 DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
1189         return 0;
1190 }
1191
1192 static void
1193 dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
1194                           uint32_t index)
1195 {
1196         PMD_INIT_FUNC_TRACE();
1197
1198         fman_if_clear_mac_addr(dev->process_private, index);
1199 }
1200
1201 static int
1202 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
1203                        struct rte_ether_addr *addr)
1204 {
1205         int ret;
1206
1207         PMD_INIT_FUNC_TRACE();
1208
1209         ret = fman_if_add_mac_addr(dev->process_private, addr->addr_bytes, 0);
1210         if (ret)
1211                 DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
1212
1213         return ret;
1214 }
1215
1216 static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
1217                                       uint16_t queue_id)
1218 {
1219         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1220         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1221
1222         if (!rxq->is_static)
1223                 return -EINVAL;
1224
1225         return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
1226 }
1227
1228 static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
1229                                        uint16_t queue_id)
1230 {
1231         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1232         struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
1233         uint32_t temp;
1234         ssize_t temp1;
1235
1236         if (!rxq->is_static)
1237                 return -EINVAL;
1238
1239         qman_fq_portal_irqsource_remove(rxq->qp, ~0);
1240
1241         temp1 = read(rxq->q_fd, &temp, sizeof(temp));
1242         if (temp1 != sizeof(temp))
1243                 DPAA_PMD_ERR("irq read error");
1244
1245         qman_fq_portal_thread_irq(rxq->qp);
1246
1247         return 0;
1248 }
1249
1250 static struct eth_dev_ops dpaa_devops = {
1251         .dev_configure            = dpaa_eth_dev_configure,
1252         .dev_start                = dpaa_eth_dev_start,
1253         .dev_stop                 = dpaa_eth_dev_stop,
1254         .dev_close                = dpaa_eth_dev_close,
1255         .dev_infos_get            = dpaa_eth_dev_info,
1256         .dev_supported_ptypes_get = dpaa_supported_ptypes_get,
1257
1258         .rx_queue_setup           = dpaa_eth_rx_queue_setup,
1259         .tx_queue_setup           = dpaa_eth_tx_queue_setup,
1260         .rx_queue_release         = dpaa_eth_rx_queue_release,
1261         .tx_queue_release         = dpaa_eth_tx_queue_release,
1262         .rx_queue_count           = dpaa_dev_rx_queue_count,
1263         .rx_burst_mode_get        = dpaa_dev_rx_burst_mode_get,
1264         .tx_burst_mode_get        = dpaa_dev_tx_burst_mode_get,
1265         .flow_ctrl_get            = dpaa_flow_ctrl_get,
1266         .flow_ctrl_set            = dpaa_flow_ctrl_set,
1267
1268         .link_update              = dpaa_eth_link_update,
1269         .stats_get                = dpaa_eth_stats_get,
1270         .xstats_get               = dpaa_dev_xstats_get,
1271         .xstats_get_by_id         = dpaa_xstats_get_by_id,
1272         .xstats_get_names_by_id   = dpaa_xstats_get_names_by_id,
1273         .xstats_get_names         = dpaa_xstats_get_names,
1274         .xstats_reset             = dpaa_eth_stats_reset,
1275         .stats_reset              = dpaa_eth_stats_reset,
1276         .promiscuous_enable       = dpaa_eth_promiscuous_enable,
1277         .promiscuous_disable      = dpaa_eth_promiscuous_disable,
1278         .allmulticast_enable      = dpaa_eth_multicast_enable,
1279         .allmulticast_disable     = dpaa_eth_multicast_disable,
1280         .mtu_set                  = dpaa_mtu_set,
1281         .dev_set_link_down        = dpaa_link_down,
1282         .dev_set_link_up          = dpaa_link_up,
1283         .mac_addr_add             = dpaa_dev_add_mac_addr,
1284         .mac_addr_remove          = dpaa_dev_remove_mac_addr,
1285         .mac_addr_set             = dpaa_dev_set_mac_addr,
1286
1287         .fw_version_get           = dpaa_fw_version_get,
1288
1289         .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
1290         .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
1291 };
1292
1293 static bool
1294 is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
1295 {
1296         if (strcmp(dev->device->driver->name,
1297                    drv->driver.name))
1298                 return false;
1299
1300         return true;
1301 }
1302
1303 static bool
1304 is_dpaa_supported(struct rte_eth_dev *dev)
1305 {
1306         return is_device_supported(dev, &rte_dpaa_pmd);
1307 }
1308
1309 int
1310 rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
1311 {
1312         struct rte_eth_dev *dev;
1313
1314         RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
1315
1316         dev = &rte_eth_devices[port];
1317
1318         if (!is_dpaa_supported(dev))
1319                 return -ENOTSUP;
1320
1321         if (on)
1322                 fman_if_loopback_enable(dev->process_private);
1323         else
1324                 fman_if_loopback_disable(dev->process_private);
1325
1326         return 0;
1327 }
1328
1329 static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
1330                                struct fman_if *fman_intf)
1331 {
1332         struct rte_eth_fc_conf *fc_conf;
1333         int ret;
1334
1335         PMD_INIT_FUNC_TRACE();
1336
1337         if (!(dpaa_intf->fc_conf)) {
1338                 dpaa_intf->fc_conf = rte_zmalloc(NULL,
1339                         sizeof(struct rte_eth_fc_conf), MAX_CACHELINE);
1340                 if (!dpaa_intf->fc_conf) {
1341                         DPAA_PMD_ERR("unable to save flow control info");
1342                         return -ENOMEM;
1343                 }
1344         }
1345         fc_conf = dpaa_intf->fc_conf;
1346         ret = fman_if_get_fc_threshold(fman_intf);
1347         if (ret) {
1348                 fc_conf->mode = RTE_FC_TX_PAUSE;
1349                 fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
1350         } else {
1351                 fc_conf->mode = RTE_FC_NONE;
1352         }
1353
1354         return 0;
1355 }
1356
1357 /* Initialise an Rx FQ */
1358 static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
1359                               uint32_t fqid)
1360 {
1361         struct qm_mcc_initfq opts = {0};
1362         int ret;
1363         u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
1364         struct qm_mcc_initcgr cgr_opts = {
1365                 .we_mask = QM_CGR_WE_CS_THRES |
1366                                 QM_CGR_WE_CSTD_EN |
1367                                 QM_CGR_WE_MODE,
1368                 .cgr = {
1369                         .cstd_en = QM_CGR_EN,
1370                         .mode = QMAN_CGR_MODE_FRAME
1371                 }
1372         };
1373
1374         if (fqid) {
1375                 ret = qman_reserve_fqid(fqid);
1376                 if (ret) {
1377                         DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
1378                                      fqid, ret);
1379                         return -EINVAL;
1380                 }
1381         } else {
1382                 flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1383         }
1384         DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
1385         ret = qman_create_fq(fqid, flags, fq);
1386         if (ret) {
1387                 DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
1388                         fqid, ret);
1389                 return ret;
1390         }
1391         fq->is_static = false;
1392
1393         dpaa_poll_queue_default_config(&opts);
1394
1395         if (cgr_rx) {
1396                 /* Enable tail drop with cgr on this queue */
1397                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, td_threshold, 0);
1398                 cgr_rx->cb = NULL;
1399                 ret = qman_create_cgr(cgr_rx, QMAN_CGR_FLAG_USE_INIT,
1400                                       &cgr_opts);
1401                 if (ret) {
1402                         DPAA_PMD_WARN(
1403                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1404                                 fq->fqid, ret);
1405                         goto without_cgr;
1406                 }
1407                 opts.we_mask |= QM_INITFQ_WE_CGID;
1408                 opts.fqd.cgid = cgr_rx->cgrid;
1409                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1410         }
1411 without_cgr:
1412         ret = qman_init_fq(fq, 0, &opts);
1413         if (ret)
1414                 DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
1415         return ret;
1416 }
1417
1418 /* Initialise a Tx FQ */
1419 static int dpaa_tx_queue_init(struct qman_fq *fq,
1420                               struct fman_if *fman_intf,
1421                               struct qman_cgr *cgr_tx)
1422 {
1423         struct qm_mcc_initfq opts = {0};
1424         struct qm_mcc_initcgr cgr_opts = {
1425                 .we_mask = QM_CGR_WE_CS_THRES |
1426                                 QM_CGR_WE_CSTD_EN |
1427                                 QM_CGR_WE_MODE,
1428                 .cgr = {
1429                         .cstd_en = QM_CGR_EN,
1430                         .mode = QMAN_CGR_MODE_FRAME
1431                 }
1432         };
1433         int ret;
1434
1435         ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
1436                              QMAN_FQ_FLAG_TO_DCPORTAL, fq);
1437         if (ret) {
1438                 DPAA_PMD_ERR("create tx fq failed with ret: %d", ret);
1439                 return ret;
1440         }
1441         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
1442                        QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA;
1443         opts.fqd.dest.channel = fman_intf->tx_channel_id;
1444         opts.fqd.dest.wq = DPAA_IF_TX_PRIORITY;
1445         opts.fqd.fq_ctrl = QM_FQCTRL_PREFERINCACHE;
1446         opts.fqd.context_b = 0;
1447         /* no tx-confirmation */
1448         opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
1449         opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
1450         DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
1451
1452         if (cgr_tx) {
1453                 /* Enable tail drop with cgr on this queue */
1454                 qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
1455                                       td_tx_threshold, 0);
1456                 cgr_tx->cb = NULL;
1457                 ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
1458                                       &cgr_opts);
1459                 if (ret) {
1460                         DPAA_PMD_WARN(
1461                                 "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
1462                                 fq->fqid, ret);
1463                         goto without_cgr;
1464                 }
1465                 opts.we_mask |= QM_INITFQ_WE_CGID;
1466                 opts.fqd.cgid = cgr_tx->cgrid;
1467                 opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
1468                 DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
1469                                 td_tx_threshold);
1470         }
1471 without_cgr:
1472         ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
1473         if (ret)
1474                 DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
1475         return ret;
1476 }
1477
1478 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1479 /* Initialise a DEBUG FQ ([rt]x_error, rx_default). */
1480 static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
1481 {
1482         struct qm_mcc_initfq opts = {0};
1483         int ret;
1484
1485         PMD_INIT_FUNC_TRACE();
1486
1487         ret = qman_reserve_fqid(fqid);
1488         if (ret) {
1489                 DPAA_PMD_ERR("Reserve debug fqid %d failed with ret: %d",
1490                         fqid, ret);
1491                 return -EINVAL;
1492         }
1493         /* "map" this Rx FQ to one of the interfaces Tx FQID */
1494         DPAA_PMD_DEBUG("Creating debug fq %p, fqid %d", fq, fqid);
1495         ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
1496         if (ret) {
1497                 DPAA_PMD_ERR("create debug fqid %d failed with ret: %d",
1498                         fqid, ret);
1499                 return ret;
1500         }
1501         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL;
1502         opts.fqd.dest.wq = DPAA_IF_DEBUG_PRIORITY;
1503         ret = qman_init_fq(fq, 0, &opts);
1504         if (ret)
1505                 DPAA_PMD_ERR("init debug fqid %d failed with ret: %d",
1506                             fqid, ret);
1507         return ret;
1508 }
1509 #endif
1510
1511 /* Initialise a network interface */
1512 static int
1513 dpaa_dev_init_secondary(struct rte_eth_dev *eth_dev)
1514 {
1515         struct rte_dpaa_device *dpaa_device;
1516         struct fm_eth_port_cfg *cfg;
1517         struct dpaa_if *dpaa_intf;
1518         struct fman_if *fman_intf;
1519         int dev_id;
1520
1521         PMD_INIT_FUNC_TRACE();
1522
1523         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1524         dev_id = dpaa_device->id.dev_id;
1525         cfg = dpaa_get_eth_port_cfg(dev_id);
1526         fman_intf = cfg->fman_if;
1527         eth_dev->process_private = fman_intf;
1528
1529         /* Plugging of UCODE burst API not supported in Secondary */
1530         dpaa_intf = eth_dev->data->dev_private;
1531         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1532         if (dpaa_intf->cgr_tx)
1533                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
1534         else
1535                 eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
1536 #ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1537         qman_set_fq_lookup_table(
1538                 dpaa_intf->rx_queues->qman_fq_lookup_table);
1539 #endif
1540
1541         return 0;
1542 }
1543
1544 /* Initialise a network interface */
1545 static int
1546 dpaa_dev_init(struct rte_eth_dev *eth_dev)
1547 {
1548         int num_rx_fqs, fqid;
1549         int loop, ret = 0;
1550         int dev_id;
1551         struct rte_dpaa_device *dpaa_device;
1552         struct dpaa_if *dpaa_intf;
1553         struct fm_eth_port_cfg *cfg;
1554         struct fman_if *fman_intf;
1555         struct fman_if_bpool *bp, *tmp_bp;
1556         uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
1557         uint32_t cgrid_tx[MAX_DPAA_CORES];
1558         char eth_buf[RTE_ETHER_ADDR_FMT_SIZE];
1559
1560         PMD_INIT_FUNC_TRACE();
1561
1562         dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
1563         dev_id = dpaa_device->id.dev_id;
1564         dpaa_intf = eth_dev->data->dev_private;
1565         cfg = dpaa_get_eth_port_cfg(dev_id);
1566         fman_intf = cfg->fman_if;
1567
1568         dpaa_intf->name = dpaa_device->name;
1569
1570         /* save fman_if & cfg in the interface struture */
1571         eth_dev->process_private = fman_intf;
1572         dpaa_intf->ifid = dev_id;
1573         dpaa_intf->cfg = cfg;
1574
1575         /* Initialize Rx FQ's */
1576         if (default_q) {
1577                 num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1578         } else {
1579                 if (getenv("DPAA_NUM_RX_QUEUES"))
1580                         num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
1581                 else
1582                         num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
1583         }
1584
1585
1586         /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
1587          * queues.
1588          */
1589         if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
1590                 DPAA_PMD_ERR("Invalid number of RX queues\n");
1591                 return -EINVAL;
1592         }
1593
1594         dpaa_intf->rx_queues = rte_zmalloc(NULL,
1595                 sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
1596         if (!dpaa_intf->rx_queues) {
1597                 DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
1598                 return -ENOMEM;
1599         }
1600
1601         memset(cgrid, 0, sizeof(cgrid));
1602         memset(cgrid_tx, 0, sizeof(cgrid_tx));
1603
1604         /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
1605          * Tx tail drop is disabled.
1606          */
1607         if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
1608                 td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
1609                 DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
1610                                td_tx_threshold);
1611                 /* if a very large value is being configured */
1612                 if (td_tx_threshold > UINT16_MAX)
1613                         td_tx_threshold = CGR_RX_PERFQ_THRESH;
1614         }
1615
1616         /* If congestion control is enabled globally*/
1617         if (td_threshold) {
1618                 dpaa_intf->cgr_rx = rte_zmalloc(NULL,
1619                         sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
1620                 if (!dpaa_intf->cgr_rx) {
1621                         DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
1622                         ret = -ENOMEM;
1623                         goto free_rx;
1624                 }
1625
1626                 ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
1627                 if (ret != num_rx_fqs) {
1628                         DPAA_PMD_WARN("insufficient CGRIDs available");
1629                         ret = -EINVAL;
1630                         goto free_rx;
1631                 }
1632         } else {
1633                 dpaa_intf->cgr_rx = NULL;
1634         }
1635
1636         for (loop = 0; loop < num_rx_fqs; loop++) {
1637                 if (default_q)
1638                         fqid = cfg->rx_def;
1639                 else
1640                         fqid = DPAA_PCD_FQID_START + fman_intf->mac_idx *
1641                                 DPAA_PCD_FQID_MULTIPLIER + loop;
1642
1643                 if (dpaa_intf->cgr_rx)
1644                         dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
1645
1646                 ret = dpaa_rx_queue_init(&dpaa_intf->rx_queues[loop],
1647                         dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
1648                         fqid);
1649                 if (ret)
1650                         goto free_rx;
1651                 dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
1652         }
1653         dpaa_intf->nb_rx_queues = num_rx_fqs;
1654
1655         /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
1656         dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
1657                 MAX_DPAA_CORES, MAX_CACHELINE);
1658         if (!dpaa_intf->tx_queues) {
1659                 DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
1660                 ret = -ENOMEM;
1661                 goto free_rx;
1662         }
1663
1664         /* If congestion control is enabled globally*/
1665         if (td_tx_threshold) {
1666                 dpaa_intf->cgr_tx = rte_zmalloc(NULL,
1667                         sizeof(struct qman_cgr) * MAX_DPAA_CORES,
1668                         MAX_CACHELINE);
1669                 if (!dpaa_intf->cgr_tx) {
1670                         DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
1671                         ret = -ENOMEM;
1672                         goto free_rx;
1673                 }
1674
1675                 ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
1676                                              1, 0);
1677                 if (ret != MAX_DPAA_CORES) {
1678                         DPAA_PMD_WARN("insufficient CGRIDs available");
1679                         ret = -EINVAL;
1680                         goto free_rx;
1681                 }
1682         } else {
1683                 dpaa_intf->cgr_tx = NULL;
1684         }
1685
1686
1687         for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
1688                 if (dpaa_intf->cgr_tx)
1689                         dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
1690
1691                 ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
1692                         fman_intf,
1693                         dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
1694                 if (ret)
1695                         goto free_tx;
1696                 dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
1697         }
1698         dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
1699
1700 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
1701         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1702                 DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
1703         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
1704         dpaa_debug_queue_init(&dpaa_intf->debug_queues[
1705                 DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
1706         dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
1707 #endif
1708
1709         DPAA_PMD_DEBUG("All frame queues created");
1710
1711         /* Get the initial configuration for flow control */
1712         dpaa_fc_set_default(dpaa_intf, fman_intf);
1713
1714         /* reset bpool list, initialize bpool dynamically */
1715         list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
1716                 list_del(&bp->node);
1717                 rte_free(bp);
1718         }
1719
1720         /* Populate ethdev structure */
1721         eth_dev->dev_ops = &dpaa_devops;
1722         eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
1723         eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
1724
1725         /* Allocate memory for storing MAC addresses */
1726         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
1727                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
1728         if (eth_dev->data->mac_addrs == NULL) {
1729                 DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
1730                                                 "store MAC addresses",
1731                                 RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
1732                 ret = -ENOMEM;
1733                 goto free_tx;
1734         }
1735
1736         /* copy the primary mac address */
1737         rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
1738         rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr);
1739
1740         DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf);
1741
1742         /* Disable RX mode */
1743         fman_if_discard_rx_errors(fman_intf);
1744         fman_if_disable_rx(fman_intf);
1745         /* Disable promiscuous mode */
1746         fman_if_promiscuous_disable(fman_intf);
1747         /* Disable multicast */
1748         fman_if_reset_mcast_filter_table(fman_intf);
1749         /* Reset interface statistics */
1750         fman_if_stats_reset(fman_intf);
1751         /* Disable SG by default */
1752         fman_if_set_sg(fman_intf, 0);
1753         fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
1754
1755         return 0;
1756
1757 free_tx:
1758         rte_free(dpaa_intf->tx_queues);
1759         dpaa_intf->tx_queues = NULL;
1760         dpaa_intf->nb_tx_queues = 0;
1761
1762 free_rx:
1763         rte_free(dpaa_intf->cgr_rx);
1764         rte_free(dpaa_intf->cgr_tx);
1765         rte_free(dpaa_intf->rx_queues);
1766         dpaa_intf->rx_queues = NULL;
1767         dpaa_intf->nb_rx_queues = 0;
1768         return ret;
1769 }
1770
1771 static int
1772 dpaa_dev_uninit(struct rte_eth_dev *dev)
1773 {
1774         struct dpaa_if *dpaa_intf = dev->data->dev_private;
1775         int loop;
1776
1777         PMD_INIT_FUNC_TRACE();
1778
1779         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1780                 return -EPERM;
1781
1782         if (!dpaa_intf) {
1783                 DPAA_PMD_WARN("Already closed or not started");
1784                 return -1;
1785         }
1786
1787         dpaa_eth_dev_close(dev);
1788
1789         /* release configuration memory */
1790         if (dpaa_intf->fc_conf)
1791                 rte_free(dpaa_intf->fc_conf);
1792
1793         /* Release RX congestion Groups */
1794         if (dpaa_intf->cgr_rx) {
1795                 for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
1796                         qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
1797
1798                 qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
1799                                          dpaa_intf->nb_rx_queues);
1800         }
1801
1802         rte_free(dpaa_intf->cgr_rx);
1803         dpaa_intf->cgr_rx = NULL;
1804
1805         /* Release TX congestion Groups */
1806         if (dpaa_intf->cgr_tx) {
1807                 for (loop = 0; loop < MAX_DPAA_CORES; loop++)
1808                         qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
1809
1810                 qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,
1811                                          MAX_DPAA_CORES);
1812                 rte_free(dpaa_intf->cgr_tx);
1813                 dpaa_intf->cgr_tx = NULL;
1814         }
1815
1816         rte_free(dpaa_intf->rx_queues);
1817         dpaa_intf->rx_queues = NULL;
1818
1819         rte_free(dpaa_intf->tx_queues);
1820         dpaa_intf->tx_queues = NULL;
1821
1822         dev->dev_ops = NULL;
1823         dev->rx_pkt_burst = NULL;
1824         dev->tx_pkt_burst = NULL;
1825
1826         return 0;
1827 }
1828
1829 static int
1830 rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
1831                struct rte_dpaa_device *dpaa_dev)
1832 {
1833         int diag;
1834         int ret;
1835         struct rte_eth_dev *eth_dev;
1836
1837         PMD_INIT_FUNC_TRACE();
1838
1839         if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
1840                 RTE_PKTMBUF_HEADROOM) {
1841                 DPAA_PMD_ERR(
1842                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
1843                 RTE_PKTMBUF_HEADROOM,
1844                 DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
1845
1846                 return -1;
1847         }
1848
1849         /* In case of secondary process, the device is already configured
1850          * and no further action is required, except portal initialization
1851          * and verifying secondary attachment to port name.
1852          */
1853         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1854                 eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
1855                 if (!eth_dev)
1856                         return -ENOMEM;
1857                 eth_dev->device = &dpaa_dev->device;
1858                 eth_dev->dev_ops = &dpaa_devops;
1859
1860                 ret = dpaa_dev_init_secondary(eth_dev);
1861                 if (ret != 0) {
1862                         RTE_LOG(ERR, PMD, "secondary dev init failed\n");
1863                         return ret;
1864                 }
1865
1866                 rte_eth_dev_probing_finish(eth_dev);
1867                 return 0;
1868         }
1869
1870         if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
1871                 if (access("/tmp/fmc.bin", F_OK) == -1) {
1872                         DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
1873                         default_q = 1;
1874                 }
1875
1876                 /* disabling the default push mode for LS1043 */
1877                 if (dpaa_svr_family == SVR_LS1043A_FAMILY)
1878                         dpaa_push_mode_max_queue = 0;
1879
1880                 /* if push mode queues to be enabled. Currenly we are allowing
1881                  * only one queue per thread.
1882                  */
1883                 if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
1884                         dpaa_push_mode_max_queue =
1885                                         atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
1886                         if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
1887                             dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
1888                 }
1889
1890                 is_global_init = 1;
1891         }
1892
1893         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1894                 ret = rte_dpaa_portal_init((void *)1);
1895                 if (ret) {
1896                         DPAA_PMD_ERR("Unable to initialize portal");
1897                         return ret;
1898                 }
1899         }
1900
1901         eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
1902         if (!eth_dev)
1903                 return -ENOMEM;
1904
1905         eth_dev->data->dev_private =
1906                         rte_zmalloc("ethdev private structure",
1907                                         sizeof(struct dpaa_if),
1908                                         RTE_CACHE_LINE_SIZE);
1909         if (!eth_dev->data->dev_private) {
1910                 DPAA_PMD_ERR("Cannot allocate memzone for port data");
1911                 rte_eth_dev_release_port(eth_dev);
1912                 return -ENOMEM;
1913         }
1914
1915         eth_dev->device = &dpaa_dev->device;
1916         dpaa_dev->eth_dev = eth_dev;
1917
1918         qman_ern_register_cb(dpaa_free_mbuf);
1919
1920         if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
1921                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1922
1923         /* Invoke PMD device initialization function */
1924         diag = dpaa_dev_init(eth_dev);
1925         if (diag == 0) {
1926                 rte_eth_dev_probing_finish(eth_dev);
1927                 return 0;
1928         }
1929
1930         rte_eth_dev_release_port(eth_dev);
1931         return diag;
1932 }
1933
1934 static int
1935 rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
1936 {
1937         struct rte_eth_dev *eth_dev;
1938
1939         PMD_INIT_FUNC_TRACE();
1940
1941         eth_dev = dpaa_dev->eth_dev;
1942         dpaa_dev_uninit(eth_dev);
1943
1944         rte_eth_dev_release_port(eth_dev);
1945
1946         return 0;
1947 }
1948
1949 static struct rte_dpaa_driver rte_dpaa_pmd = {
1950         .drv_flags = RTE_DPAA_DRV_INTR_LSC,
1951         .drv_type = FSL_DPAA_ETH,
1952         .probe = rte_dpaa_probe,
1953         .remove = rte_dpaa_remove,
1954 };
1955
1956 RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
1957 RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE);