net/dpaa2: add CGR counters in xstats
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include <fsl_qbman_debug.h>
30
31 #define DRIVER_LOOPBACK_MODE "drv_loopback"
32 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
33
34 /* Supported Rx offloads */
35 static uint64_t dev_rx_offloads_sup =
36                 DEV_RX_OFFLOAD_CHECKSUM |
37                 DEV_RX_OFFLOAD_SCTP_CKSUM |
38                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
39                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
40                 DEV_RX_OFFLOAD_VLAN_STRIP |
41                 DEV_RX_OFFLOAD_VLAN_FILTER |
42                 DEV_RX_OFFLOAD_JUMBO_FRAME |
43                 DEV_RX_OFFLOAD_TIMESTAMP;
44
45 /* Rx offloads which cannot be disabled */
46 static uint64_t dev_rx_offloads_nodis =
47                 DEV_RX_OFFLOAD_SCATTER;
48
49 /* Supported Tx offloads */
50 static uint64_t dev_tx_offloads_sup =
51                 DEV_TX_OFFLOAD_VLAN_INSERT |
52                 DEV_TX_OFFLOAD_IPV4_CKSUM |
53                 DEV_TX_OFFLOAD_UDP_CKSUM |
54                 DEV_TX_OFFLOAD_TCP_CKSUM |
55                 DEV_TX_OFFLOAD_SCTP_CKSUM |
56                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
57                 DEV_TX_OFFLOAD_MT_LOCKFREE |
58                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
59
60 /* Tx offloads which cannot be disabled */
61 static uint64_t dev_tx_offloads_nodis =
62                 DEV_TX_OFFLOAD_MULTI_SEGS;
63
64 /* enable timestamp in mbuf */
65 enum pmd_dpaa2_ts dpaa2_enable_ts;
66
67 struct rte_dpaa2_xstats_name_off {
68         char name[RTE_ETH_XSTATS_NAME_SIZE];
69         uint8_t page_id; /* dpni statistics page id */
70         uint8_t stats_id; /* stats id in the given page */
71 };
72
73 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
74         {"ingress_multicast_frames", 0, 2},
75         {"ingress_multicast_bytes", 0, 3},
76         {"ingress_broadcast_frames", 0, 4},
77         {"ingress_broadcast_bytes", 0, 5},
78         {"egress_multicast_frames", 1, 2},
79         {"egress_multicast_bytes", 1, 3},
80         {"egress_broadcast_frames", 1, 4},
81         {"egress_broadcast_bytes", 1, 5},
82         {"ingress_filtered_frames", 2, 0},
83         {"ingress_discarded_frames", 2, 1},
84         {"ingress_nobuffer_discards", 2, 2},
85         {"egress_discarded_frames", 2, 3},
86         {"egress_confirmed_frames", 2, 4},
87         {"cgr_reject_frames", 4, 0},
88         {"cgr_reject_bytes", 4, 1},
89 };
90
91 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
92         RTE_ETH_FILTER_ADD,
93         RTE_ETH_FILTER_DELETE,
94         RTE_ETH_FILTER_UPDATE,
95         RTE_ETH_FILTER_FLUSH,
96         RTE_ETH_FILTER_GET
97 };
98
99 static struct rte_dpaa2_driver rte_dpaa2_pmd;
100 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
101 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
102                                  int wait_to_complete);
103 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
104 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
105 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
106
107 int dpaa2_logtype_pmd;
108
109 void
110 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
111 {
112         dpaa2_enable_ts = enable;
113 }
114
115 static int
116 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
117 {
118         int ret;
119         struct dpaa2_dev_priv *priv = dev->data->dev_private;
120         struct fsl_mc_io *dpni = priv->hw;
121
122         PMD_INIT_FUNC_TRACE();
123
124         if (dpni == NULL) {
125                 DPAA2_PMD_ERR("dpni is NULL");
126                 return -1;
127         }
128
129         if (on)
130                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
131                                        priv->token, vlan_id);
132         else
133                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
134                                           priv->token, vlan_id);
135
136         if (ret < 0)
137                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
138                               ret, vlan_id, priv->hw_id);
139
140         return ret;
141 }
142
143 static int
144 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
145 {
146         struct dpaa2_dev_priv *priv = dev->data->dev_private;
147         struct fsl_mc_io *dpni = priv->hw;
148         int ret;
149
150         PMD_INIT_FUNC_TRACE();
151
152         if (mask & ETH_VLAN_FILTER_MASK) {
153                 /* VLAN Filter not avaialble */
154                 if (!priv->max_vlan_filters) {
155                         DPAA2_PMD_INFO("VLAN filter not available");
156                         goto next_mask;
157                 }
158
159                 if (dev->data->dev_conf.rxmode.offloads &
160                         DEV_RX_OFFLOAD_VLAN_FILTER)
161                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
162                                                       priv->token, true);
163                 else
164                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
165                                                       priv->token, false);
166                 if (ret < 0)
167                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
168         }
169 next_mask:
170         if (mask & ETH_VLAN_EXTEND_MASK) {
171                 if (dev->data->dev_conf.rxmode.offloads &
172                         DEV_RX_OFFLOAD_VLAN_EXTEND)
173                         DPAA2_PMD_INFO("VLAN extend offload not supported");
174         }
175
176         return 0;
177 }
178
179 static int
180 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
181                       enum rte_vlan_type vlan_type __rte_unused,
182                       uint16_t tpid)
183 {
184         struct dpaa2_dev_priv *priv = dev->data->dev_private;
185         struct fsl_mc_io *dpni = priv->hw;
186         int ret = -ENOTSUP;
187
188         PMD_INIT_FUNC_TRACE();
189
190         /* nothing to be done for standard vlan tpids */
191         if (tpid == 0x8100 || tpid == 0x88A8)
192                 return 0;
193
194         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
195                                    priv->token, tpid);
196         if (ret < 0)
197                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
198         /* if already configured tpids, remove them first */
199         if (ret == -EBUSY) {
200                 struct dpni_custom_tpid_cfg tpid_list = {0};
201
202                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
203                                    priv->token, &tpid_list);
204                 if (ret < 0)
205                         goto fail;
206                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
207                                    priv->token, tpid_list.tpid1);
208                 if (ret < 0)
209                         goto fail;
210                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
211                                            priv->token, tpid);
212         }
213 fail:
214         return ret;
215 }
216
217 static int
218 dpaa2_fw_version_get(struct rte_eth_dev *dev,
219                      char *fw_version,
220                      size_t fw_size)
221 {
222         int ret;
223         struct dpaa2_dev_priv *priv = dev->data->dev_private;
224         struct fsl_mc_io *dpni = priv->hw;
225         struct mc_soc_version mc_plat_info = {0};
226         struct mc_version mc_ver_info = {0};
227
228         PMD_INIT_FUNC_TRACE();
229
230         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
231                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
232
233         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
234                 DPAA2_PMD_WARN("\tmc_get_version failed");
235
236         ret = snprintf(fw_version, fw_size,
237                        "%x-%d.%d.%d",
238                        mc_plat_info.svr,
239                        mc_ver_info.major,
240                        mc_ver_info.minor,
241                        mc_ver_info.revision);
242
243         ret += 1; /* add the size of '\0' */
244         if (fw_size < (uint32_t)ret)
245                 return ret;
246         else
247                 return 0;
248 }
249
250 static int
251 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
252 {
253         struct dpaa2_dev_priv *priv = dev->data->dev_private;
254
255         PMD_INIT_FUNC_TRACE();
256
257         dev_info->if_index = priv->hw_id;
258
259         dev_info->max_mac_addrs = priv->max_mac_filters;
260         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
261         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
262         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
263         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
264         dev_info->rx_offload_capa = dev_rx_offloads_sup |
265                                         dev_rx_offloads_nodis;
266         dev_info->tx_offload_capa = dev_tx_offloads_sup |
267                                         dev_tx_offloads_nodis;
268         dev_info->speed_capa = ETH_LINK_SPEED_1G |
269                         ETH_LINK_SPEED_2_5G |
270                         ETH_LINK_SPEED_10G;
271
272         dev_info->max_hash_mac_addrs = 0;
273         dev_info->max_vfs = 0;
274         dev_info->max_vmdq_pools = ETH_16_POOLS;
275         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
276
277         return 0;
278 }
279
280 static int
281 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
282 {
283         struct dpaa2_dev_priv *priv = dev->data->dev_private;
284         uint16_t dist_idx;
285         uint32_t vq_id;
286         uint8_t num_rxqueue_per_tc;
287         struct dpaa2_queue *mc_q, *mcq;
288         uint32_t tot_queues;
289         int i;
290         struct dpaa2_queue *dpaa2_q;
291
292         PMD_INIT_FUNC_TRACE();
293
294         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
295         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
296         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
297                           RTE_CACHE_LINE_SIZE);
298         if (!mc_q) {
299                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
300                 return -1;
301         }
302
303         for (i = 0; i < priv->nb_rx_queues; i++) {
304                 mc_q->eth_data = dev->data;
305                 priv->rx_vq[i] = mc_q++;
306                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
307                 dpaa2_q->q_storage = rte_malloc("dq_storage",
308                                         sizeof(struct queue_storage_info_t),
309                                         RTE_CACHE_LINE_SIZE);
310                 if (!dpaa2_q->q_storage)
311                         goto fail;
312
313                 memset(dpaa2_q->q_storage, 0,
314                        sizeof(struct queue_storage_info_t));
315                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
316                         goto fail;
317         }
318
319         for (i = 0; i < priv->nb_tx_queues; i++) {
320                 mc_q->eth_data = dev->data;
321                 mc_q->flow_id = 0xffff;
322                 priv->tx_vq[i] = mc_q++;
323                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
324                 dpaa2_q->cscn = rte_malloc(NULL,
325                                            sizeof(struct qbman_result), 16);
326                 if (!dpaa2_q->cscn)
327                         goto fail_tx;
328         }
329
330         vq_id = 0;
331         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
332                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
333                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
334                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
335                 vq_id++;
336         }
337
338         return 0;
339 fail_tx:
340         i -= 1;
341         while (i >= 0) {
342                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
343                 rte_free(dpaa2_q->cscn);
344                 priv->tx_vq[i--] = NULL;
345         }
346         i = priv->nb_rx_queues;
347 fail:
348         i -= 1;
349         mc_q = priv->rx_vq[0];
350         while (i >= 0) {
351                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
352                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
353                 rte_free(dpaa2_q->q_storage);
354                 priv->rx_vq[i--] = NULL;
355         }
356         rte_free(mc_q);
357         return -1;
358 }
359
360 static void
361 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
362 {
363         struct dpaa2_dev_priv *priv = dev->data->dev_private;
364         struct dpaa2_queue *dpaa2_q;
365         int i;
366
367         PMD_INIT_FUNC_TRACE();
368
369         /* Queue allocation base */
370         if (priv->rx_vq[0]) {
371                 /* cleaning up queue storage */
372                 for (i = 0; i < priv->nb_rx_queues; i++) {
373                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
374                         if (dpaa2_q->q_storage)
375                                 rte_free(dpaa2_q->q_storage);
376                 }
377                 /* cleanup tx queue cscn */
378                 for (i = 0; i < priv->nb_tx_queues; i++) {
379                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
380                         rte_free(dpaa2_q->cscn);
381                 }
382                 /*free memory for all queues (RX+TX) */
383                 rte_free(priv->rx_vq[0]);
384                 priv->rx_vq[0] = NULL;
385         }
386 }
387
388 static int
389 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
390 {
391         struct dpaa2_dev_priv *priv = dev->data->dev_private;
392         struct fsl_mc_io *dpni = priv->hw;
393         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
394         uint64_t rx_offloads = eth_conf->rxmode.offloads;
395         uint64_t tx_offloads = eth_conf->txmode.offloads;
396         int rx_l3_csum_offload = false;
397         int rx_l4_csum_offload = false;
398         int tx_l3_csum_offload = false;
399         int tx_l4_csum_offload = false;
400         int ret;
401
402         PMD_INIT_FUNC_TRACE();
403
404         /* Rx offloads which are enabled by default */
405         if (dev_rx_offloads_nodis & ~rx_offloads) {
406                 DPAA2_PMD_INFO(
407                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
408                 " fixed are 0x%" PRIx64,
409                 rx_offloads, dev_rx_offloads_nodis);
410         }
411
412         /* Tx offloads which are enabled by default */
413         if (dev_tx_offloads_nodis & ~tx_offloads) {
414                 DPAA2_PMD_INFO(
415                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
416                 " fixed are 0x%" PRIx64,
417                 tx_offloads, dev_tx_offloads_nodis);
418         }
419
420         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
421                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
422                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
423                                 priv->token, eth_conf->rxmode.max_rx_pkt_len);
424                         if (ret) {
425                                 DPAA2_PMD_ERR(
426                                         "Unable to set mtu. check config");
427                                 return ret;
428                         }
429                 } else {
430                         return -1;
431                 }
432         }
433
434         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
435                 ret = dpaa2_setup_flow_dist(dev,
436                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
437                 if (ret) {
438                         DPAA2_PMD_ERR("Unable to set flow distribution."
439                                       "Check queue config");
440                         return ret;
441                 }
442         }
443
444         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
445                 rx_l3_csum_offload = true;
446
447         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
448                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
449                 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
450                 rx_l4_csum_offload = true;
451
452         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
453                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
454         if (ret) {
455                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
456                 return ret;
457         }
458
459         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
460                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
461         if (ret) {
462                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
463                 return ret;
464         }
465
466         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
467                 dpaa2_enable_ts = true;
468
469         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
470                 tx_l3_csum_offload = true;
471
472         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
473                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
474                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
475                 tx_l4_csum_offload = true;
476
477         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
478                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
479         if (ret) {
480                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
481                 return ret;
482         }
483
484         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
485                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
486         if (ret) {
487                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
488                 return ret;
489         }
490
491         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
492          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
493          * to 0 for LS2 in the hardware thus disabling data/annotation
494          * stashing. For LX2 this is fixed in hardware and thus hash result and
495          * parse results can be received in FD using this option.
496          */
497         if (dpaa2_svr_family == SVR_LX2160A) {
498                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
499                                        DPNI_FLCTYPE_HASH, true);
500                 if (ret) {
501                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
502                         return ret;
503                 }
504         }
505
506         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
507                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
508
509         /* update the current status */
510         dpaa2_dev_link_update(dev, 0);
511
512         return 0;
513 }
514
515 /* Function to setup RX flow information. It contains traffic class ID,
516  * flow ID, destination configuration etc.
517  */
518 static int
519 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
520                          uint16_t rx_queue_id,
521                          uint16_t nb_rx_desc,
522                          unsigned int socket_id __rte_unused,
523                          const struct rte_eth_rxconf *rx_conf __rte_unused,
524                          struct rte_mempool *mb_pool)
525 {
526         struct dpaa2_dev_priv *priv = dev->data->dev_private;
527         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
528         struct dpaa2_queue *dpaa2_q;
529         struct dpni_queue cfg;
530         uint8_t options = 0;
531         uint8_t flow_id;
532         uint32_t bpid;
533         int i, ret;
534
535         PMD_INIT_FUNC_TRACE();
536
537         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
538                         dev, rx_queue_id, mb_pool, rx_conf);
539
540         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
541                 bpid = mempool_to_bpid(mb_pool);
542                 ret = dpaa2_attach_bp_list(priv,
543                                            rte_dpaa2_bpid_info[bpid].bp_list);
544                 if (ret)
545                         return ret;
546         }
547         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
548         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
549         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
550
551         /*Get the flow id from given VQ id*/
552         flow_id = dpaa2_q->flow_id;
553         memset(&cfg, 0, sizeof(struct dpni_queue));
554
555         options = options | DPNI_QUEUE_OPT_USER_CTX;
556         cfg.user_context = (size_t)(dpaa2_q);
557
558         /* check if a private cgr available. */
559         for (i = 0; i < priv->max_cgs; i++) {
560                 if (!priv->cgid_in_use[i]) {
561                         priv->cgid_in_use[i] = 1;
562                         break;
563                 }
564         }
565
566         if (i < priv->max_cgs) {
567                 options |= DPNI_QUEUE_OPT_SET_CGID;
568                 cfg.cgid = i;
569                 dpaa2_q->cgid = cfg.cgid;
570         } else {
571                 dpaa2_q->cgid = 0xff;
572         }
573
574         /*if ls2088 or rev2 device, enable the stashing */
575
576         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
577                 options |= DPNI_QUEUE_OPT_FLC;
578                 cfg.flc.stash_control = true;
579                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
580                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
581                  * data stashing setting 01 01 00 (0x14)
582                  * (in following order ->DS AS CS)
583                  * to enable 1 line data, 1 line annotation.
584                  * For LX2, this setting should be 01 00 00 (0x10)
585                  */
586                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
587                         cfg.flc.value |= 0x10;
588                 else
589                         cfg.flc.value |= 0x14;
590         }
591         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
592                              dpaa2_q->tc_index, flow_id, options, &cfg);
593         if (ret) {
594                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
595                 return -1;
596         }
597
598         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
599                 struct dpni_taildrop taildrop;
600
601                 taildrop.enable = 1;
602
603                 /* Private CGR will use tail drop length as nb_rx_desc.
604                  * for rest cases we can use standard byte based tail drop.
605                  * There is no HW restriction, but number of CGRs are limited,
606                  * hence this restriction is placed.
607                  */
608                 if (dpaa2_q->cgid != 0xff) {
609                         /*enabling per rx queue congestion control */
610                         taildrop.threshold = nb_rx_desc;
611                         taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
612                         taildrop.oal = 0;
613                         DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
614                                         rx_queue_id);
615                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
616                                                 DPNI_CP_CONGESTION_GROUP,
617                                                 DPNI_QUEUE_RX,
618                                                 dpaa2_q->tc_index,
619                                                 flow_id, &taildrop);
620                 } else {
621                         /*enabling per rx queue congestion control */
622                         taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
623                         taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
624                         taildrop.oal = CONG_RX_OAL;
625                         DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
626                                         rx_queue_id);
627                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
628                                                 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
629                                                 dpaa2_q->tc_index, flow_id,
630                                                 &taildrop);
631                 }
632                 if (ret) {
633                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
634                                       ret);
635                         return -1;
636                 }
637         } else { /* Disable tail Drop */
638                 struct dpni_taildrop taildrop = {0};
639                 DPAA2_PMD_INFO("Tail drop is disabled on queue");
640
641                 taildrop.enable = 0;
642                 if (dpaa2_q->cgid != 0xff) {
643                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
644                                         DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
645                                         dpaa2_q->tc_index,
646                                         flow_id, &taildrop);
647                 } else {
648                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
649                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
650                                         dpaa2_q->tc_index, flow_id, &taildrop);
651                 }
652                 if (ret) {
653                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
654                                       ret);
655                         return -1;
656                 }
657         }
658
659         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
660         return 0;
661 }
662
663 static int
664 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
665                          uint16_t tx_queue_id,
666                          uint16_t nb_tx_desc __rte_unused,
667                          unsigned int socket_id __rte_unused,
668                          const struct rte_eth_txconf *tx_conf __rte_unused)
669 {
670         struct dpaa2_dev_priv *priv = dev->data->dev_private;
671         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
672                 priv->tx_vq[tx_queue_id];
673         struct fsl_mc_io *dpni = priv->hw;
674         struct dpni_queue tx_conf_cfg;
675         struct dpni_queue tx_flow_cfg;
676         uint8_t options = 0, flow_id;
677         uint32_t tc_id;
678         int ret;
679
680         PMD_INIT_FUNC_TRACE();
681
682         /* Return if queue already configured */
683         if (dpaa2_q->flow_id != 0xffff) {
684                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
685                 return 0;
686         }
687
688         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
689         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
690
691         tc_id = tx_queue_id;
692         flow_id = 0;
693
694         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
695                              tc_id, flow_id, options, &tx_flow_cfg);
696         if (ret) {
697                 DPAA2_PMD_ERR("Error in setting the tx flow: "
698                               "tc_id=%d, flow=%d err=%d",
699                               tc_id, flow_id, ret);
700                         return -1;
701         }
702
703         dpaa2_q->flow_id = flow_id;
704
705         if (tx_queue_id == 0) {
706                 /*Set tx-conf and error configuration*/
707                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
708                                                     priv->token,
709                                                     DPNI_CONF_DISABLE);
710                 if (ret) {
711                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
712                                       "err=%d", ret);
713                         return -1;
714                 }
715         }
716         dpaa2_q->tc_index = tc_id;
717
718         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
719                 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
720
721                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
722                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
723                 /* Notify that the queue is not congested when the data in
724                  * the queue is below this thershold.
725                  */
726                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
727                 cong_notif_cfg.message_ctx = 0;
728                 cong_notif_cfg.message_iova =
729                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
730                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
731                 cong_notif_cfg.notification_mode =
732                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
733                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
734                                          DPNI_CONG_OPT_COHERENT_WRITE;
735                 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
736
737                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
738                                                        priv->token,
739                                                        DPNI_QUEUE_TX,
740                                                        tc_id,
741                                                        &cong_notif_cfg);
742                 if (ret) {
743                         DPAA2_PMD_ERR(
744                            "Error in setting tx congestion notification: "
745                            "err=%d", ret);
746                         return -ret;
747                 }
748         }
749         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
750         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
751         return 0;
752 }
753
754 static void
755 dpaa2_dev_rx_queue_release(void *q __rte_unused)
756 {
757         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
758         struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
759         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
760         uint8_t options = 0;
761         int ret;
762         struct dpni_queue cfg;
763
764         memset(&cfg, 0, sizeof(struct dpni_queue));
765         PMD_INIT_FUNC_TRACE();
766         if (dpaa2_q->cgid != 0xff) {
767                 options = DPNI_QUEUE_OPT_CLEAR_CGID;
768                 cfg.cgid = dpaa2_q->cgid;
769
770                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
771                                      DPNI_QUEUE_RX,
772                                      dpaa2_q->tc_index, dpaa2_q->flow_id,
773                                      options, &cfg);
774                 if (ret)
775                         DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
776                                         dpaa2_q->fqid, ret);
777                 priv->cgid_in_use[dpaa2_q->cgid] = 0;
778                 dpaa2_q->cgid = 0xff;
779         }
780 }
781
782 static void
783 dpaa2_dev_tx_queue_release(void *q __rte_unused)
784 {
785         PMD_INIT_FUNC_TRACE();
786 }
787
788 static uint32_t
789 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
790 {
791         int32_t ret;
792         struct dpaa2_dev_priv *priv = dev->data->dev_private;
793         struct dpaa2_queue *dpaa2_q;
794         struct qbman_swp *swp;
795         struct qbman_fq_query_np_rslt state;
796         uint32_t frame_cnt = 0;
797
798         PMD_INIT_FUNC_TRACE();
799
800         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
801                 ret = dpaa2_affine_qbman_swp();
802                 if (ret) {
803                         DPAA2_PMD_ERR("Failure in affining portal");
804                         return -EINVAL;
805                 }
806         }
807         swp = DPAA2_PER_LCORE_PORTAL;
808
809         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
810
811         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
812                 frame_cnt = qbman_fq_state_frame_count(&state);
813                 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
814                                 rx_queue_id, frame_cnt);
815         }
816         return frame_cnt;
817 }
818
819 static const uint32_t *
820 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
821 {
822         static const uint32_t ptypes[] = {
823                 /*todo -= add more types */
824                 RTE_PTYPE_L2_ETHER,
825                 RTE_PTYPE_L3_IPV4,
826                 RTE_PTYPE_L3_IPV4_EXT,
827                 RTE_PTYPE_L3_IPV6,
828                 RTE_PTYPE_L3_IPV6_EXT,
829                 RTE_PTYPE_L4_TCP,
830                 RTE_PTYPE_L4_UDP,
831                 RTE_PTYPE_L4_SCTP,
832                 RTE_PTYPE_L4_ICMP,
833                 RTE_PTYPE_UNKNOWN
834         };
835
836         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
837                 dev->rx_pkt_burst == dpaa2_dev_rx ||
838                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
839                 return ptypes;
840         return NULL;
841 }
842
843 /**
844  * Dpaa2 link Interrupt handler
845  *
846  * @param param
847  *  The address of parameter (struct rte_eth_dev *) regsitered before.
848  *
849  * @return
850  *  void
851  */
852 static void
853 dpaa2_interrupt_handler(void *param)
854 {
855         struct rte_eth_dev *dev = param;
856         struct dpaa2_dev_priv *priv = dev->data->dev_private;
857         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
858         int ret;
859         int irq_index = DPNI_IRQ_INDEX;
860         unsigned int status = 0, clear = 0;
861
862         PMD_INIT_FUNC_TRACE();
863
864         if (dpni == NULL) {
865                 DPAA2_PMD_ERR("dpni is NULL");
866                 return;
867         }
868
869         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
870                                   irq_index, &status);
871         if (unlikely(ret)) {
872                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
873                 clear = 0xffffffff;
874                 goto out;
875         }
876
877         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
878                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
879                 dpaa2_dev_link_update(dev, 0);
880                 /* calling all the apps registered for link status event */
881                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
882                                               NULL);
883         }
884 out:
885         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
886                                     irq_index, clear);
887         if (unlikely(ret))
888                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
889 }
890
891 static int
892 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
893 {
894         int err = 0;
895         struct dpaa2_dev_priv *priv = dev->data->dev_private;
896         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
897         int irq_index = DPNI_IRQ_INDEX;
898         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
899
900         PMD_INIT_FUNC_TRACE();
901
902         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
903                                 irq_index, mask);
904         if (err < 0) {
905                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
906                               strerror(-err));
907                 return err;
908         }
909
910         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
911                                   irq_index, enable);
912         if (err < 0)
913                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
914                               strerror(-err));
915
916         return err;
917 }
918
919 static int
920 dpaa2_dev_start(struct rte_eth_dev *dev)
921 {
922         struct rte_device *rdev = dev->device;
923         struct rte_dpaa2_device *dpaa2_dev;
924         struct rte_eth_dev_data *data = dev->data;
925         struct dpaa2_dev_priv *priv = data->dev_private;
926         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
927         struct dpni_queue cfg;
928         struct dpni_error_cfg   err_cfg;
929         uint16_t qdid;
930         struct dpni_queue_id qid;
931         struct dpaa2_queue *dpaa2_q;
932         int ret, i;
933         struct rte_intr_handle *intr_handle;
934
935         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
936         intr_handle = &dpaa2_dev->intr_handle;
937
938         PMD_INIT_FUNC_TRACE();
939
940         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
941         if (ret) {
942                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
943                               priv->hw_id, ret);
944                 return ret;
945         }
946
947         /* Power up the phy. Needed to make the link go UP */
948         dpaa2_dev_set_link_up(dev);
949
950         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
951                             DPNI_QUEUE_TX, &qdid);
952         if (ret) {
953                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
954                 return ret;
955         }
956         priv->qdid = qdid;
957
958         for (i = 0; i < data->nb_rx_queues; i++) {
959                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
960                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
961                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
962                                        dpaa2_q->flow_id, &cfg, &qid);
963                 if (ret) {
964                         DPAA2_PMD_ERR("Error in getting flow information: "
965                                       "err=%d", ret);
966                         return ret;
967                 }
968                 dpaa2_q->fqid = qid.fqid;
969         }
970
971         /*checksum errors, send them to normal path and set it in annotation */
972         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
973         err_cfg.errors |= DPNI_ERROR_PHE;
974
975         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
976         err_cfg.set_frame_annotation = true;
977
978         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
979                                        priv->token, &err_cfg);
980         if (ret) {
981                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
982                               ret);
983                 return ret;
984         }
985
986         /* if the interrupts were configured on this devices*/
987         if (intr_handle && (intr_handle->fd) &&
988             (dev->data->dev_conf.intr_conf.lsc != 0)) {
989                 /* Registering LSC interrupt handler */
990                 rte_intr_callback_register(intr_handle,
991                                            dpaa2_interrupt_handler,
992                                            (void *)dev);
993
994                 /* enable vfio intr/eventfd mapping
995                  * Interrupt index 0 is required, so we can not use
996                  * rte_intr_enable.
997                  */
998                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
999
1000                 /* enable dpni_irqs */
1001                 dpaa2_eth_setup_irqs(dev, 1);
1002         }
1003
1004         /* Change the tx burst function if ordered queues are used */
1005         if (priv->en_ordered)
1006                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1007
1008         return 0;
1009 }
1010
1011 /**
1012  *  This routine disables all traffic on the adapter by issuing a
1013  *  global reset on the MAC.
1014  */
1015 static void
1016 dpaa2_dev_stop(struct rte_eth_dev *dev)
1017 {
1018         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1019         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1020         int ret;
1021         struct rte_eth_link link;
1022         struct rte_intr_handle *intr_handle = dev->intr_handle;
1023
1024         PMD_INIT_FUNC_TRACE();
1025
1026         /* reset interrupt callback  */
1027         if (intr_handle && (intr_handle->fd) &&
1028             (dev->data->dev_conf.intr_conf.lsc != 0)) {
1029                 /*disable dpni irqs */
1030                 dpaa2_eth_setup_irqs(dev, 0);
1031
1032                 /* disable vfio intr before callback unregister */
1033                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1034
1035                 /* Unregistering LSC interrupt handler */
1036                 rte_intr_callback_unregister(intr_handle,
1037                                              dpaa2_interrupt_handler,
1038                                              (void *)dev);
1039         }
1040
1041         dpaa2_dev_set_link_down(dev);
1042
1043         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1044         if (ret) {
1045                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1046                               ret, priv->hw_id);
1047                 return;
1048         }
1049
1050         /* clear the recorded link status */
1051         memset(&link, 0, sizeof(link));
1052         rte_eth_linkstatus_set(dev, &link);
1053 }
1054
1055 static void
1056 dpaa2_dev_close(struct rte_eth_dev *dev)
1057 {
1058         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1059         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1060         int ret;
1061         struct rte_eth_link link;
1062
1063         PMD_INIT_FUNC_TRACE();
1064
1065         dpaa2_flow_clean(dev);
1066
1067         /* Clean the device first */
1068         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1069         if (ret) {
1070                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1071                 return;
1072         }
1073
1074         memset(&link, 0, sizeof(link));
1075         rte_eth_linkstatus_set(dev, &link);
1076 }
1077
1078 static int
1079 dpaa2_dev_promiscuous_enable(
1080                 struct rte_eth_dev *dev)
1081 {
1082         int ret;
1083         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1084         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1085
1086         PMD_INIT_FUNC_TRACE();
1087
1088         if (dpni == NULL) {
1089                 DPAA2_PMD_ERR("dpni is NULL");
1090                 return -ENODEV;
1091         }
1092
1093         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1094         if (ret < 0)
1095                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1096
1097         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1098         if (ret < 0)
1099                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1100
1101         return ret;
1102 }
1103
1104 static int
1105 dpaa2_dev_promiscuous_disable(
1106                 struct rte_eth_dev *dev)
1107 {
1108         int ret;
1109         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1110         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1111
1112         PMD_INIT_FUNC_TRACE();
1113
1114         if (dpni == NULL) {
1115                 DPAA2_PMD_ERR("dpni is NULL");
1116                 return -ENODEV;
1117         }
1118
1119         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1120         if (ret < 0)
1121                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1122
1123         if (dev->data->all_multicast == 0) {
1124                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1125                                                  priv->token, false);
1126                 if (ret < 0)
1127                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1128                                       ret);
1129         }
1130
1131         return ret;
1132 }
1133
1134 static int
1135 dpaa2_dev_allmulticast_enable(
1136                 struct rte_eth_dev *dev)
1137 {
1138         int ret;
1139         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1140         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1141
1142         PMD_INIT_FUNC_TRACE();
1143
1144         if (dpni == NULL) {
1145                 DPAA2_PMD_ERR("dpni is NULL");
1146                 return -ENODEV;
1147         }
1148
1149         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1150         if (ret < 0)
1151                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1152
1153         return ret;
1154 }
1155
1156 static int
1157 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1158 {
1159         int ret;
1160         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1161         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1162
1163         PMD_INIT_FUNC_TRACE();
1164
1165         if (dpni == NULL) {
1166                 DPAA2_PMD_ERR("dpni is NULL");
1167                 return -ENODEV;
1168         }
1169
1170         /* must remain on for all promiscuous */
1171         if (dev->data->promiscuous == 1)
1172                 return 0;
1173
1174         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1175         if (ret < 0)
1176                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1177
1178         return ret;
1179 }
1180
1181 static int
1182 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1183 {
1184         int ret;
1185         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1186         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1187         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1188                                 + VLAN_TAG_SIZE;
1189
1190         PMD_INIT_FUNC_TRACE();
1191
1192         if (dpni == NULL) {
1193                 DPAA2_PMD_ERR("dpni is NULL");
1194                 return -EINVAL;
1195         }
1196
1197         /* check that mtu is within the allowed range */
1198         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1199                 return -EINVAL;
1200
1201         if (frame_size > RTE_ETHER_MAX_LEN)
1202                 dev->data->dev_conf.rxmode.offloads &=
1203                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1204         else
1205                 dev->data->dev_conf.rxmode.offloads &=
1206                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1207
1208         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1209
1210         /* Set the Max Rx frame length as 'mtu' +
1211          * Maximum Ethernet header length
1212          */
1213         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1214                                         frame_size);
1215         if (ret) {
1216                 DPAA2_PMD_ERR("Setting the max frame length failed");
1217                 return -1;
1218         }
1219         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1220         return 0;
1221 }
1222
1223 static int
1224 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1225                        struct rte_ether_addr *addr,
1226                        __rte_unused uint32_t index,
1227                        __rte_unused uint32_t pool)
1228 {
1229         int ret;
1230         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1231         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1232
1233         PMD_INIT_FUNC_TRACE();
1234
1235         if (dpni == NULL) {
1236                 DPAA2_PMD_ERR("dpni is NULL");
1237                 return -1;
1238         }
1239
1240         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1241                                 priv->token, addr->addr_bytes);
1242         if (ret)
1243                 DPAA2_PMD_ERR(
1244                         "error: Adding the MAC ADDR failed: err = %d", ret);
1245         return 0;
1246 }
1247
1248 static void
1249 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1250                           uint32_t index)
1251 {
1252         int ret;
1253         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1254         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1255         struct rte_eth_dev_data *data = dev->data;
1256         struct rte_ether_addr *macaddr;
1257
1258         PMD_INIT_FUNC_TRACE();
1259
1260         macaddr = &data->mac_addrs[index];
1261
1262         if (dpni == NULL) {
1263                 DPAA2_PMD_ERR("dpni is NULL");
1264                 return;
1265         }
1266
1267         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1268                                    priv->token, macaddr->addr_bytes);
1269         if (ret)
1270                 DPAA2_PMD_ERR(
1271                         "error: Removing the MAC ADDR failed: err = %d", ret);
1272 }
1273
1274 static int
1275 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1276                        struct rte_ether_addr *addr)
1277 {
1278         int ret;
1279         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1280         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1281
1282         PMD_INIT_FUNC_TRACE();
1283
1284         if (dpni == NULL) {
1285                 DPAA2_PMD_ERR("dpni is NULL");
1286                 return -EINVAL;
1287         }
1288
1289         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1290                                         priv->token, addr->addr_bytes);
1291
1292         if (ret)
1293                 DPAA2_PMD_ERR(
1294                         "error: Setting the MAC ADDR failed %d", ret);
1295
1296         return ret;
1297 }
1298
1299 static
1300 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1301                          struct rte_eth_stats *stats)
1302 {
1303         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1304         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1305         int32_t  retcode;
1306         uint8_t page0 = 0, page1 = 1, page2 = 2;
1307         union dpni_statistics value;
1308         int i;
1309         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1310
1311         memset(&value, 0, sizeof(union dpni_statistics));
1312
1313         PMD_INIT_FUNC_TRACE();
1314
1315         if (!dpni) {
1316                 DPAA2_PMD_ERR("dpni is NULL");
1317                 return -EINVAL;
1318         }
1319
1320         if (!stats) {
1321                 DPAA2_PMD_ERR("stats is NULL");
1322                 return -EINVAL;
1323         }
1324
1325         /*Get Counters from page_0*/
1326         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1327                                       page0, 0, &value);
1328         if (retcode)
1329                 goto err;
1330
1331         stats->ipackets = value.page_0.ingress_all_frames;
1332         stats->ibytes = value.page_0.ingress_all_bytes;
1333
1334         /*Get Counters from page_1*/
1335         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1336                                       page1, 0, &value);
1337         if (retcode)
1338                 goto err;
1339
1340         stats->opackets = value.page_1.egress_all_frames;
1341         stats->obytes = value.page_1.egress_all_bytes;
1342
1343         /*Get Counters from page_2*/
1344         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1345                                       page2, 0, &value);
1346         if (retcode)
1347                 goto err;
1348
1349         /* Ingress drop frame count due to configured rules */
1350         stats->ierrors = value.page_2.ingress_filtered_frames;
1351         /* Ingress drop frame count due to error */
1352         stats->ierrors += value.page_2.ingress_discarded_frames;
1353
1354         stats->oerrors = value.page_2.egress_discarded_frames;
1355         stats->imissed = value.page_2.ingress_nobuffer_discards;
1356
1357         /* Fill in per queue stats */
1358         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1359                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1360                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1361                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1362                 if (dpaa2_rxq)
1363                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1364                 if (dpaa2_txq)
1365                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1366
1367                 /* Byte counting is not implemented */
1368                 stats->q_ibytes[i]   = 0;
1369                 stats->q_obytes[i]   = 0;
1370         }
1371
1372         return 0;
1373
1374 err:
1375         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1376         return retcode;
1377 };
1378
1379 static int
1380 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1381                      unsigned int n)
1382 {
1383         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1384         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1385         int32_t  retcode;
1386         union dpni_statistics value[5] = {};
1387         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1388
1389         if (n < num)
1390                 return num;
1391
1392         if (xstats == NULL)
1393                 return 0;
1394
1395         /* Get Counters from page_0*/
1396         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1397                                       0, 0, &value[0]);
1398         if (retcode)
1399                 goto err;
1400
1401         /* Get Counters from page_1*/
1402         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1403                                       1, 0, &value[1]);
1404         if (retcode)
1405                 goto err;
1406
1407         /* Get Counters from page_2*/
1408         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1409                                       2, 0, &value[2]);
1410         if (retcode)
1411                 goto err;
1412
1413         for (i = 0; i < priv->max_cgs; i++) {
1414                 if (!priv->cgid_in_use[i]) {
1415                         /* Get Counters from page_4*/
1416                         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1417                                                       priv->token,
1418                                                       4, 0, &value[4]);
1419                         if (retcode)
1420                                 goto err;
1421                         break;
1422                 }
1423         }
1424
1425         for (i = 0; i < num; i++) {
1426                 xstats[i].id = i;
1427                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1428                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1429         }
1430         return i;
1431 err:
1432         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1433         return retcode;
1434 }
1435
1436 static int
1437 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1438                        struct rte_eth_xstat_name *xstats_names,
1439                        unsigned int limit)
1440 {
1441         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1442
1443         if (limit < stat_cnt)
1444                 return stat_cnt;
1445
1446         if (xstats_names != NULL)
1447                 for (i = 0; i < stat_cnt; i++)
1448                         strlcpy(xstats_names[i].name,
1449                                 dpaa2_xstats_strings[i].name,
1450                                 sizeof(xstats_names[i].name));
1451
1452         return stat_cnt;
1453 }
1454
1455 static int
1456 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1457                        uint64_t *values, unsigned int n)
1458 {
1459         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1460         uint64_t values_copy[stat_cnt];
1461
1462         if (!ids) {
1463                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1464                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1465                 int32_t  retcode;
1466                 union dpni_statistics value[5] = {};
1467
1468                 if (n < stat_cnt)
1469                         return stat_cnt;
1470
1471                 if (!values)
1472                         return 0;
1473
1474                 /* Get Counters from page_0*/
1475                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1476                                               0, 0, &value[0]);
1477                 if (retcode)
1478                         return 0;
1479
1480                 /* Get Counters from page_1*/
1481                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1482                                               1, 0, &value[1]);
1483                 if (retcode)
1484                         return 0;
1485
1486                 /* Get Counters from page_2*/
1487                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1488                                               2, 0, &value[2]);
1489                 if (retcode)
1490                         return 0;
1491
1492                 /* Get Counters from page_4*/
1493                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1494                                               4, 0, &value[4]);
1495                 if (retcode)
1496                         return 0;
1497
1498                 for (i = 0; i < stat_cnt; i++) {
1499                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1500                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1501                 }
1502                 return stat_cnt;
1503         }
1504
1505         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1506
1507         for (i = 0; i < n; i++) {
1508                 if (ids[i] >= stat_cnt) {
1509                         DPAA2_PMD_ERR("xstats id value isn't valid");
1510                         return -1;
1511                 }
1512                 values[i] = values_copy[ids[i]];
1513         }
1514         return n;
1515 }
1516
1517 static int
1518 dpaa2_xstats_get_names_by_id(
1519         struct rte_eth_dev *dev,
1520         struct rte_eth_xstat_name *xstats_names,
1521         const uint64_t *ids,
1522         unsigned int limit)
1523 {
1524         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1525         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1526
1527         if (!ids)
1528                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1529
1530         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1531
1532         for (i = 0; i < limit; i++) {
1533                 if (ids[i] >= stat_cnt) {
1534                         DPAA2_PMD_ERR("xstats id value isn't valid");
1535                         return -1;
1536                 }
1537                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1538         }
1539         return limit;
1540 }
1541
1542 static int
1543 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1544 {
1545         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1546         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1547         int retcode;
1548         int i;
1549         struct dpaa2_queue *dpaa2_q;
1550
1551         PMD_INIT_FUNC_TRACE();
1552
1553         if (dpni == NULL) {
1554                 DPAA2_PMD_ERR("dpni is NULL");
1555                 return -EINVAL;
1556         }
1557
1558         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1559         if (retcode)
1560                 goto error;
1561
1562         /* Reset the per queue stats in dpaa2_queue structure */
1563         for (i = 0; i < priv->nb_rx_queues; i++) {
1564                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1565                 if (dpaa2_q)
1566                         dpaa2_q->rx_pkts = 0;
1567         }
1568
1569         for (i = 0; i < priv->nb_tx_queues; i++) {
1570                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1571                 if (dpaa2_q)
1572                         dpaa2_q->tx_pkts = 0;
1573         }
1574
1575         return 0;
1576
1577 error:
1578         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1579         return retcode;
1580 };
1581
1582 /* return 0 means link status changed, -1 means not changed */
1583 static int
1584 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1585                         int wait_to_complete __rte_unused)
1586 {
1587         int ret;
1588         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1589         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1590         struct rte_eth_link link;
1591         struct dpni_link_state state = {0};
1592
1593         if (dpni == NULL) {
1594                 DPAA2_PMD_ERR("dpni is NULL");
1595                 return 0;
1596         }
1597
1598         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1599         if (ret < 0) {
1600                 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1601                 return -1;
1602         }
1603
1604         memset(&link, 0, sizeof(struct rte_eth_link));
1605         link.link_status = state.up;
1606         link.link_speed = state.rate;
1607
1608         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1609                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1610         else
1611                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1612
1613         ret = rte_eth_linkstatus_set(dev, &link);
1614         if (ret == -1)
1615                 DPAA2_PMD_DEBUG("No change in status");
1616         else
1617                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1618                                link.link_status ? "Up" : "Down");
1619
1620         return ret;
1621 }
1622
1623 /**
1624  * Toggle the DPNI to enable, if not already enabled.
1625  * This is not strictly PHY up/down - it is more of logical toggling.
1626  */
1627 static int
1628 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1629 {
1630         int ret = -EINVAL;
1631         struct dpaa2_dev_priv *priv;
1632         struct fsl_mc_io *dpni;
1633         int en = 0;
1634         struct dpni_link_state state = {0};
1635
1636         priv = dev->data->dev_private;
1637         dpni = (struct fsl_mc_io *)priv->hw;
1638
1639         if (dpni == NULL) {
1640                 DPAA2_PMD_ERR("dpni is NULL");
1641                 return ret;
1642         }
1643
1644         /* Check if DPNI is currently enabled */
1645         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1646         if (ret) {
1647                 /* Unable to obtain dpni status; Not continuing */
1648                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1649                 return -EINVAL;
1650         }
1651
1652         /* Enable link if not already enabled */
1653         if (!en) {
1654                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1655                 if (ret) {
1656                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1657                         return -EINVAL;
1658                 }
1659         }
1660         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1661         if (ret < 0) {
1662                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1663                 return -1;
1664         }
1665
1666         /* changing tx burst function to start enqueues */
1667         dev->tx_pkt_burst = dpaa2_dev_tx;
1668         dev->data->dev_link.link_status = state.up;
1669
1670         if (state.up)
1671                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1672         else
1673                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1674         return ret;
1675 }
1676
1677 /**
1678  * Toggle the DPNI to disable, if not already disabled.
1679  * This is not strictly PHY up/down - it is more of logical toggling.
1680  */
1681 static int
1682 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1683 {
1684         int ret = -EINVAL;
1685         struct dpaa2_dev_priv *priv;
1686         struct fsl_mc_io *dpni;
1687         int dpni_enabled = 0;
1688         int retries = 10;
1689
1690         PMD_INIT_FUNC_TRACE();
1691
1692         priv = dev->data->dev_private;
1693         dpni = (struct fsl_mc_io *)priv->hw;
1694
1695         if (dpni == NULL) {
1696                 DPAA2_PMD_ERR("Device has not yet been configured");
1697                 return ret;
1698         }
1699
1700         /*changing  tx burst function to avoid any more enqueues */
1701         dev->tx_pkt_burst = dummy_dev_tx;
1702
1703         /* Loop while dpni_disable() attempts to drain the egress FQs
1704          * and confirm them back to us.
1705          */
1706         do {
1707                 ret = dpni_disable(dpni, 0, priv->token);
1708                 if (ret) {
1709                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1710                         return ret;
1711                 }
1712                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1713                 if (ret) {
1714                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1715                         return ret;
1716                 }
1717                 if (dpni_enabled)
1718                         /* Allow the MC some slack */
1719                         rte_delay_us(100 * 1000);
1720         } while (dpni_enabled && --retries);
1721
1722         if (!retries) {
1723                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1724                 /* todo- we may have to manually cleanup queues.
1725                  */
1726         } else {
1727                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1728                                dev->data->port_id);
1729         }
1730
1731         dev->data->dev_link.link_status = 0;
1732
1733         return ret;
1734 }
1735
1736 static int
1737 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1738 {
1739         int ret = -EINVAL;
1740         struct dpaa2_dev_priv *priv;
1741         struct fsl_mc_io *dpni;
1742         struct dpni_link_state state = {0};
1743
1744         PMD_INIT_FUNC_TRACE();
1745
1746         priv = dev->data->dev_private;
1747         dpni = (struct fsl_mc_io *)priv->hw;
1748
1749         if (dpni == NULL || fc_conf == NULL) {
1750                 DPAA2_PMD_ERR("device not configured");
1751                 return ret;
1752         }
1753
1754         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1755         if (ret) {
1756                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1757                 return ret;
1758         }
1759
1760         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1761         if (state.options & DPNI_LINK_OPT_PAUSE) {
1762                 /* DPNI_LINK_OPT_PAUSE set
1763                  *  if ASYM_PAUSE not set,
1764                  *      RX Side flow control (handle received Pause frame)
1765                  *      TX side flow control (send Pause frame)
1766                  *  if ASYM_PAUSE set,
1767                  *      RX Side flow control (handle received Pause frame)
1768                  *      No TX side flow control (send Pause frame disabled)
1769                  */
1770                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1771                         fc_conf->mode = RTE_FC_FULL;
1772                 else
1773                         fc_conf->mode = RTE_FC_RX_PAUSE;
1774         } else {
1775                 /* DPNI_LINK_OPT_PAUSE not set
1776                  *  if ASYM_PAUSE set,
1777                  *      TX side flow control (send Pause frame)
1778                  *      No RX side flow control (No action on pause frame rx)
1779                  *  if ASYM_PAUSE not set,
1780                  *      Flow control disabled
1781                  */
1782                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1783                         fc_conf->mode = RTE_FC_TX_PAUSE;
1784                 else
1785                         fc_conf->mode = RTE_FC_NONE;
1786         }
1787
1788         return ret;
1789 }
1790
1791 static int
1792 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1793 {
1794         int ret = -EINVAL;
1795         struct dpaa2_dev_priv *priv;
1796         struct fsl_mc_io *dpni;
1797         struct dpni_link_state state = {0};
1798         struct dpni_link_cfg cfg = {0};
1799
1800         PMD_INIT_FUNC_TRACE();
1801
1802         priv = dev->data->dev_private;
1803         dpni = (struct fsl_mc_io *)priv->hw;
1804
1805         if (dpni == NULL) {
1806                 DPAA2_PMD_ERR("dpni is NULL");
1807                 return ret;
1808         }
1809
1810         /* It is necessary to obtain the current state before setting fc_conf
1811          * as MC would return error in case rate, autoneg or duplex values are
1812          * different.
1813          */
1814         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1815         if (ret) {
1816                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1817                 return -1;
1818         }
1819
1820         /* Disable link before setting configuration */
1821         dpaa2_dev_set_link_down(dev);
1822
1823         /* Based on fc_conf, update cfg */
1824         cfg.rate = state.rate;
1825         cfg.options = state.options;
1826
1827         /* update cfg with fc_conf */
1828         switch (fc_conf->mode) {
1829         case RTE_FC_FULL:
1830                 /* Full flow control;
1831                  * OPT_PAUSE set, ASYM_PAUSE not set
1832                  */
1833                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1834                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1835                 break;
1836         case RTE_FC_TX_PAUSE:
1837                 /* Enable RX flow control
1838                  * OPT_PAUSE not set;
1839                  * ASYM_PAUSE set;
1840                  */
1841                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1842                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1843                 break;
1844         case RTE_FC_RX_PAUSE:
1845                 /* Enable TX Flow control
1846                  * OPT_PAUSE set
1847                  * ASYM_PAUSE set
1848                  */
1849                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1850                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1851                 break;
1852         case RTE_FC_NONE:
1853                 /* Disable Flow control
1854                  * OPT_PAUSE not set
1855                  * ASYM_PAUSE not set
1856                  */
1857                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1858                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1859                 break;
1860         default:
1861                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1862                               fc_conf->mode);
1863                 return -1;
1864         }
1865
1866         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1867         if (ret)
1868                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1869                               ret);
1870
1871         /* Enable link */
1872         dpaa2_dev_set_link_up(dev);
1873
1874         return ret;
1875 }
1876
1877 static int
1878 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1879                           struct rte_eth_rss_conf *rss_conf)
1880 {
1881         struct rte_eth_dev_data *data = dev->data;
1882         struct rte_eth_conf *eth_conf = &data->dev_conf;
1883         int ret;
1884
1885         PMD_INIT_FUNC_TRACE();
1886
1887         if (rss_conf->rss_hf) {
1888                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1889                 if (ret) {
1890                         DPAA2_PMD_ERR("Unable to set flow dist");
1891                         return ret;
1892                 }
1893         } else {
1894                 ret = dpaa2_remove_flow_dist(dev, 0);
1895                 if (ret) {
1896                         DPAA2_PMD_ERR("Unable to remove flow dist");
1897                         return ret;
1898                 }
1899         }
1900         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1901         return 0;
1902 }
1903
1904 static int
1905 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1906                             struct rte_eth_rss_conf *rss_conf)
1907 {
1908         struct rte_eth_dev_data *data = dev->data;
1909         struct rte_eth_conf *eth_conf = &data->dev_conf;
1910
1911         /* dpaa2 does not support rss_key, so length should be 0*/
1912         rss_conf->rss_key_len = 0;
1913         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1914         return 0;
1915 }
1916
1917 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1918                 int eth_rx_queue_id,
1919                 uint16_t dpcon_id,
1920                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1921 {
1922         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1923         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1924         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1925         uint8_t flow_id = dpaa2_ethq->flow_id;
1926         struct dpni_queue cfg;
1927         uint8_t options;
1928         int ret;
1929
1930         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1931                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1932         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1933                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1934         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
1935                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
1936         else
1937                 return -EINVAL;
1938
1939         memset(&cfg, 0, sizeof(struct dpni_queue));
1940         options = DPNI_QUEUE_OPT_DEST;
1941         cfg.destination.type = DPNI_DEST_DPCON;
1942         cfg.destination.id = dpcon_id;
1943         cfg.destination.priority = queue_conf->ev.priority;
1944
1945         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1946                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1947                 cfg.destination.hold_active = 1;
1948         }
1949
1950         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
1951                         !eth_priv->en_ordered) {
1952                 struct opr_cfg ocfg;
1953
1954                 /* Restoration window size = 256 frames */
1955                 ocfg.oprrws = 3;
1956                 /* Restoration window size = 512 frames for LX2 */
1957                 if (dpaa2_svr_family == SVR_LX2160A)
1958                         ocfg.oprrws = 4;
1959                 /* Auto advance NESN window enabled */
1960                 ocfg.oa = 1;
1961                 /* Late arrival window size disabled */
1962                 ocfg.olws = 0;
1963                 /* ORL resource exhaustaion advance NESN disabled */
1964                 ocfg.oeane = 0;
1965                 /* Loose ordering enabled */
1966                 ocfg.oloe = 1;
1967                 eth_priv->en_loose_ordered = 1;
1968                 /* Strict ordering enabled if explicitly set */
1969                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1970                         ocfg.oloe = 0;
1971                         eth_priv->en_loose_ordered = 0;
1972                 }
1973
1974                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
1975                                    dpaa2_ethq->tc_index, flow_id,
1976                                    OPR_OPT_CREATE, &ocfg);
1977                 if (ret) {
1978                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
1979                         return ret;
1980                 }
1981
1982                 eth_priv->en_ordered = 1;
1983         }
1984
1985         options |= DPNI_QUEUE_OPT_USER_CTX;
1986         cfg.user_context = (size_t)(dpaa2_ethq);
1987
1988         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1989                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1990         if (ret) {
1991                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1992                 return ret;
1993         }
1994
1995         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1996
1997         return 0;
1998 }
1999
2000 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2001                 int eth_rx_queue_id)
2002 {
2003         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2004         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
2005         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2006         uint8_t flow_id = dpaa2_ethq->flow_id;
2007         struct dpni_queue cfg;
2008         uint8_t options;
2009         int ret;
2010
2011         memset(&cfg, 0, sizeof(struct dpni_queue));
2012         options = DPNI_QUEUE_OPT_DEST;
2013         cfg.destination.type = DPNI_DEST_NONE;
2014
2015         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2016                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
2017         if (ret)
2018                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2019
2020         return ret;
2021 }
2022
2023 static inline int
2024 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2025 {
2026         unsigned int i;
2027
2028         for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2029                 if (dpaa2_supported_filter_ops[i] == filter_op)
2030                         return 0;
2031         }
2032         return -ENOTSUP;
2033 }
2034
2035 static int
2036 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2037                     enum rte_filter_type filter_type,
2038                                  enum rte_filter_op filter_op,
2039                                  void *arg)
2040 {
2041         int ret = 0;
2042
2043         if (!dev)
2044                 return -ENODEV;
2045
2046         switch (filter_type) {
2047         case RTE_ETH_FILTER_GENERIC:
2048                 if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2049                         ret = -ENOTSUP;
2050                         break;
2051                 }
2052                 *(const void **)arg = &dpaa2_flow_ops;
2053                 dpaa2_filter_type |= filter_type;
2054                 break;
2055         default:
2056                 RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2057                         filter_type);
2058                 ret = -ENOTSUP;
2059                 break;
2060         }
2061         return ret;
2062 }
2063
2064 static struct eth_dev_ops dpaa2_ethdev_ops = {
2065         .dev_configure    = dpaa2_eth_dev_configure,
2066         .dev_start            = dpaa2_dev_start,
2067         .dev_stop             = dpaa2_dev_stop,
2068         .dev_close            = dpaa2_dev_close,
2069         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2070         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2071         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2072         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2073         .dev_set_link_up      = dpaa2_dev_set_link_up,
2074         .dev_set_link_down    = dpaa2_dev_set_link_down,
2075         .link_update       = dpaa2_dev_link_update,
2076         .stats_get             = dpaa2_dev_stats_get,
2077         .xstats_get            = dpaa2_dev_xstats_get,
2078         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
2079         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2080         .xstats_get_names      = dpaa2_xstats_get_names,
2081         .stats_reset       = dpaa2_dev_stats_reset,
2082         .xstats_reset         = dpaa2_dev_stats_reset,
2083         .fw_version_get    = dpaa2_fw_version_get,
2084         .dev_infos_get     = dpaa2_dev_info_get,
2085         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2086         .mtu_set           = dpaa2_dev_mtu_set,
2087         .vlan_filter_set      = dpaa2_vlan_filter_set,
2088         .vlan_offload_set     = dpaa2_vlan_offload_set,
2089         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
2090         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2091         .rx_queue_release  = dpaa2_dev_rx_queue_release,
2092         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2093         .tx_queue_release  = dpaa2_dev_tx_queue_release,
2094         .rx_queue_count       = dpaa2_dev_rx_queue_count,
2095         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
2096         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
2097         .mac_addr_add         = dpaa2_dev_add_mac_addr,
2098         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2099         .mac_addr_set         = dpaa2_dev_set_mac_addr,
2100         .rss_hash_update      = dpaa2_dev_rss_hash_update,
2101         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2102         .filter_ctrl          = dpaa2_dev_flow_ctrl,
2103 };
2104
2105 /* Populate the mac address from physically available (u-boot/firmware) and/or
2106  * one set by higher layers like MC (restool) etc.
2107  * Returns the table of MAC entries (multiple entries)
2108  */
2109 static int
2110 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2111                   struct rte_ether_addr *mac_entry)
2112 {
2113         int ret;
2114         struct rte_ether_addr phy_mac, prime_mac;
2115
2116         memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2117         memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2118
2119         /* Get the physical device MAC address */
2120         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2121                                      phy_mac.addr_bytes);
2122         if (ret) {
2123                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2124                 goto cleanup;
2125         }
2126
2127         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2128                                         prime_mac.addr_bytes);
2129         if (ret) {
2130                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2131                 goto cleanup;
2132         }
2133
2134         /* Now that both MAC have been obtained, do:
2135          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2136          *     and return phy
2137          *  If empty_mac(phy), return prime.
2138          *  if both are empty, create random MAC, set as prime and return
2139          */
2140         if (!rte_is_zero_ether_addr(&phy_mac)) {
2141                 /* If the addresses are not same, overwrite prime */
2142                 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2143                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2144                                                         priv->token,
2145                                                         phy_mac.addr_bytes);
2146                         if (ret) {
2147                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2148                                               ret);
2149                                 goto cleanup;
2150                         }
2151                         memcpy(&prime_mac, &phy_mac,
2152                                 sizeof(struct rte_ether_addr));
2153                 }
2154         } else if (rte_is_zero_ether_addr(&prime_mac)) {
2155                 /* In case phys and prime, both are zero, create random MAC */
2156                 rte_eth_random_addr(prime_mac.addr_bytes);
2157                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2158                                                 priv->token,
2159                                                 prime_mac.addr_bytes);
2160                 if (ret) {
2161                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2162                         goto cleanup;
2163                 }
2164         }
2165
2166         /* prime_mac the final MAC address */
2167         memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2168         return 0;
2169
2170 cleanup:
2171         return -1;
2172 }
2173
2174 static int
2175 check_devargs_handler(__rte_unused const char *key, const char *value,
2176                       __rte_unused void *opaque)
2177 {
2178         if (strcmp(value, "1"))
2179                 return -1;
2180
2181         return 0;
2182 }
2183
2184 static int
2185 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2186 {
2187         struct rte_kvargs *kvlist;
2188
2189         if (!devargs)
2190                 return 0;
2191
2192         kvlist = rte_kvargs_parse(devargs->args, NULL);
2193         if (!kvlist)
2194                 return 0;
2195
2196         if (!rte_kvargs_count(kvlist, key)) {
2197                 rte_kvargs_free(kvlist);
2198                 return 0;
2199         }
2200
2201         if (rte_kvargs_process(kvlist, key,
2202                                check_devargs_handler, NULL) < 0) {
2203                 rte_kvargs_free(kvlist);
2204                 return 0;
2205         }
2206         rte_kvargs_free(kvlist);
2207
2208         return 1;
2209 }
2210
2211 static int
2212 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2213 {
2214         struct rte_device *dev = eth_dev->device;
2215         struct rte_dpaa2_device *dpaa2_dev;
2216         struct fsl_mc_io *dpni_dev;
2217         struct dpni_attr attr;
2218         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2219         struct dpni_buffer_layout layout;
2220         int ret, hw_id, i;
2221
2222         PMD_INIT_FUNC_TRACE();
2223
2224         /* For secondary processes, the primary has done all the work */
2225         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2226                 /* In case of secondary, only burst and ops API need to be
2227                  * plugged.
2228                  */
2229                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2230                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2231                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2232                 else if (dpaa2_get_devargs(dev->devargs,
2233                                         DRIVER_NO_PREFETCH_MODE))
2234                         eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2235                 else
2236                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2237                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2238                 return 0;
2239         }
2240
2241         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2242
2243         hw_id = dpaa2_dev->object_id;
2244
2245         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2246         if (!dpni_dev) {
2247                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2248                 return -1;
2249         }
2250
2251         dpni_dev->regs = rte_mcp_ptr_list[0];
2252         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2253         if (ret) {
2254                 DPAA2_PMD_ERR(
2255                              "Failure in opening dpni@%d with err code %d",
2256                              hw_id, ret);
2257                 rte_free(dpni_dev);
2258                 return -1;
2259         }
2260
2261         /* Clean the device first */
2262         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2263         if (ret) {
2264                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2265                               hw_id, ret);
2266                 goto init_err;
2267         }
2268
2269         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2270         if (ret) {
2271                 DPAA2_PMD_ERR(
2272                              "Failure in get dpni@%d attribute, err code %d",
2273                              hw_id, ret);
2274                 goto init_err;
2275         }
2276
2277         priv->num_rx_tc = attr.num_rx_tcs;
2278         /* only if the custom CG is enabled */
2279         if (attr.options & DPNI_OPT_CUSTOM_CG)
2280                 priv->max_cgs = attr.num_cgs;
2281         else
2282                 priv->max_cgs = 0;
2283
2284         for (i = 0; i < priv->max_cgs; i++)
2285                 priv->cgid_in_use[i] = 0;
2286
2287         for (i = 0; i < attr.num_rx_tcs; i++)
2288                 priv->nb_rx_queues += attr.num_queues;
2289
2290         /* Using number of TX queues as number of TX TCs */
2291         priv->nb_tx_queues = attr.num_tx_tcs;
2292
2293         DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2294                         priv->num_rx_tc, priv->nb_rx_queues,
2295                         priv->nb_tx_queues, priv->max_cgs);
2296
2297         priv->hw = dpni_dev;
2298         priv->hw_id = hw_id;
2299         priv->options = attr.options;
2300         priv->max_mac_filters = attr.mac_filter_entries;
2301         priv->max_vlan_filters = attr.vlan_filter_entries;
2302         priv->flags = 0;
2303
2304         /* Allocate memory for hardware structure for queues */
2305         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2306         if (ret) {
2307                 DPAA2_PMD_ERR("Queue allocation Failed");
2308                 goto init_err;
2309         }
2310
2311         /* Allocate memory for storing MAC addresses.
2312          * Table of mac_filter_entries size is allocated so that RTE ether lib
2313          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2314          */
2315         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2316                 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2317         if (eth_dev->data->mac_addrs == NULL) {
2318                 DPAA2_PMD_ERR(
2319                    "Failed to allocate %d bytes needed to store MAC addresses",
2320                    RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2321                 ret = -ENOMEM;
2322                 goto init_err;
2323         }
2324
2325         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2326         if (ret) {
2327                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2328                 rte_free(eth_dev->data->mac_addrs);
2329                 eth_dev->data->mac_addrs = NULL;
2330                 goto init_err;
2331         }
2332
2333         /* ... tx buffer layout ... */
2334         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2335         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2336         layout.pass_frame_status = 1;
2337         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2338                                      DPNI_QUEUE_TX, &layout);
2339         if (ret) {
2340                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2341                 goto init_err;
2342         }
2343
2344         /* ... tx-conf and error buffer layout ... */
2345         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2346         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2347         layout.pass_frame_status = 1;
2348         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2349                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2350         if (ret) {
2351                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2352                              ret);
2353                 goto init_err;
2354         }
2355
2356         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2357
2358         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2359                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2360                 DPAA2_PMD_INFO("Loopback mode");
2361         } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2362                 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2363                 DPAA2_PMD_INFO("No Prefetch mode");
2364         } else {
2365                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2366         }
2367         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2368
2369         /*Init fields w.r.t. classficaition*/
2370         memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
2371         priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2372         if (!priv->extract.qos_extract_param) {
2373                 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2374                             " classificaiton ", ret);
2375                 goto init_err;
2376         }
2377         for (i = 0; i < MAX_TCS; i++) {
2378                 memset(&priv->extract.fs_key_cfg[i], 0,
2379                         sizeof(struct dpkg_profile_cfg));
2380                 priv->extract.fs_extract_param[i] =
2381                         (size_t)rte_malloc(NULL, 256, 64);
2382                 if (!priv->extract.fs_extract_param[i]) {
2383                         DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2384                                      ret);
2385                         goto init_err;
2386                 }
2387         }
2388
2389         RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2390         return 0;
2391 init_err:
2392         dpaa2_dev_uninit(eth_dev);
2393         return ret;
2394 }
2395
2396 static int
2397 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2398 {
2399         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2400         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2401         int i, ret;
2402
2403         PMD_INIT_FUNC_TRACE();
2404
2405         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2406                 return 0;
2407
2408         if (!dpni) {
2409                 DPAA2_PMD_WARN("Already closed or not started");
2410                 return -1;
2411         }
2412
2413         dpaa2_dev_close(eth_dev);
2414
2415         dpaa2_free_rx_tx_queues(eth_dev);
2416
2417         /* Close the device at underlying layer*/
2418         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2419         if (ret) {
2420                 DPAA2_PMD_ERR(
2421                              "Failure closing dpni device with err code %d",
2422                              ret);
2423         }
2424
2425         /* Free the allocated memory for ethernet private data and dpni*/
2426         priv->hw = NULL;
2427         rte_free(dpni);
2428
2429         for (i = 0; i < MAX_TCS; i++) {
2430                 if (priv->extract.fs_extract_param[i])
2431                         rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
2432         }
2433
2434         if (priv->extract.qos_extract_param)
2435                 rte_free((void *)(size_t)priv->extract.qos_extract_param);
2436
2437         eth_dev->dev_ops = NULL;
2438         eth_dev->rx_pkt_burst = NULL;
2439         eth_dev->tx_pkt_burst = NULL;
2440
2441         DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2442         return 0;
2443 }
2444
2445 static int
2446 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2447                 struct rte_dpaa2_device *dpaa2_dev)
2448 {
2449         struct rte_eth_dev *eth_dev;
2450         int diag;
2451
2452         if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2453                 RTE_PKTMBUF_HEADROOM) {
2454                 DPAA2_PMD_ERR(
2455                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2456                 RTE_PKTMBUF_HEADROOM,
2457                 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2458
2459                 return -1;
2460         }
2461
2462         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2463                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2464                 if (!eth_dev)
2465                         return -ENODEV;
2466                 eth_dev->data->dev_private = rte_zmalloc(
2467                                                 "ethdev private structure",
2468                                                 sizeof(struct dpaa2_dev_priv),
2469                                                 RTE_CACHE_LINE_SIZE);
2470                 if (eth_dev->data->dev_private == NULL) {
2471                         DPAA2_PMD_CRIT(
2472                                 "Unable to allocate memory for private data");
2473                         rte_eth_dev_release_port(eth_dev);
2474                         return -ENOMEM;
2475                 }
2476         } else {
2477                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2478                 if (!eth_dev)
2479                         return -ENODEV;
2480         }
2481
2482         eth_dev->device = &dpaa2_dev->device;
2483
2484         dpaa2_dev->eth_dev = eth_dev;
2485         eth_dev->data->rx_mbuf_alloc_failed = 0;
2486
2487         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2488                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2489
2490         /* Invoke PMD device initialization function */
2491         diag = dpaa2_dev_init(eth_dev);
2492         if (diag == 0) {
2493                 rte_eth_dev_probing_finish(eth_dev);
2494                 return 0;
2495         }
2496
2497         rte_eth_dev_release_port(eth_dev);
2498         return diag;
2499 }
2500
2501 static int
2502 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2503 {
2504         struct rte_eth_dev *eth_dev;
2505
2506         eth_dev = dpaa2_dev->eth_dev;
2507         dpaa2_dev_uninit(eth_dev);
2508
2509         rte_eth_dev_release_port(eth_dev);
2510
2511         return 0;
2512 }
2513
2514 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2515         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2516         .drv_type = DPAA2_ETH,
2517         .probe = rte_dpaa2_probe,
2518         .remove = rte_dpaa2_remove,
2519 };
2520
2521 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2522 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2523                 DRIVER_LOOPBACK_MODE "=<int> "
2524                 DRIVER_NO_PREFETCH_MODE "=<int>");
2525 RTE_INIT(dpaa2_pmd_init_log)
2526 {
2527         dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2528         if (dpaa2_logtype_pmd >= 0)
2529                 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
2530 }