net/dpaa2: support taildrop on frame count basis
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include <fsl_qbman_debug.h>
30
31 #define DRIVER_LOOPBACK_MODE "drv_loopback"
32 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
33
34 /* Supported Rx offloads */
35 static uint64_t dev_rx_offloads_sup =
36                 DEV_RX_OFFLOAD_CHECKSUM |
37                 DEV_RX_OFFLOAD_SCTP_CKSUM |
38                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
39                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
40                 DEV_RX_OFFLOAD_VLAN_STRIP |
41                 DEV_RX_OFFLOAD_VLAN_FILTER |
42                 DEV_RX_OFFLOAD_JUMBO_FRAME |
43                 DEV_RX_OFFLOAD_TIMESTAMP;
44
45 /* Rx offloads which cannot be disabled */
46 static uint64_t dev_rx_offloads_nodis =
47                 DEV_RX_OFFLOAD_SCATTER;
48
49 /* Supported Tx offloads */
50 static uint64_t dev_tx_offloads_sup =
51                 DEV_TX_OFFLOAD_VLAN_INSERT |
52                 DEV_TX_OFFLOAD_IPV4_CKSUM |
53                 DEV_TX_OFFLOAD_UDP_CKSUM |
54                 DEV_TX_OFFLOAD_TCP_CKSUM |
55                 DEV_TX_OFFLOAD_SCTP_CKSUM |
56                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
57                 DEV_TX_OFFLOAD_MT_LOCKFREE |
58                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
59
60 /* Tx offloads which cannot be disabled */
61 static uint64_t dev_tx_offloads_nodis =
62                 DEV_TX_OFFLOAD_MULTI_SEGS;
63
64 /* enable timestamp in mbuf */
65 enum pmd_dpaa2_ts dpaa2_enable_ts;
66
67 struct rte_dpaa2_xstats_name_off {
68         char name[RTE_ETH_XSTATS_NAME_SIZE];
69         uint8_t page_id; /* dpni statistics page id */
70         uint8_t stats_id; /* stats id in the given page */
71 };
72
73 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
74         {"ingress_multicast_frames", 0, 2},
75         {"ingress_multicast_bytes", 0, 3},
76         {"ingress_broadcast_frames", 0, 4},
77         {"ingress_broadcast_bytes", 0, 5},
78         {"egress_multicast_frames", 1, 2},
79         {"egress_multicast_bytes", 1, 3},
80         {"egress_broadcast_frames", 1, 4},
81         {"egress_broadcast_bytes", 1, 5},
82         {"ingress_filtered_frames", 2, 0},
83         {"ingress_discarded_frames", 2, 1},
84         {"ingress_nobuffer_discards", 2, 2},
85         {"egress_discarded_frames", 2, 3},
86         {"egress_confirmed_frames", 2, 4},
87 };
88
89 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
90         RTE_ETH_FILTER_ADD,
91         RTE_ETH_FILTER_DELETE,
92         RTE_ETH_FILTER_UPDATE,
93         RTE_ETH_FILTER_FLUSH,
94         RTE_ETH_FILTER_GET
95 };
96
97 static struct rte_dpaa2_driver rte_dpaa2_pmd;
98 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
99 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
100                                  int wait_to_complete);
101 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
102 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
103 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
104
105 int dpaa2_logtype_pmd;
106
107 void
108 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
109 {
110         dpaa2_enable_ts = enable;
111 }
112
113 static int
114 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
115 {
116         int ret;
117         struct dpaa2_dev_priv *priv = dev->data->dev_private;
118         struct fsl_mc_io *dpni = priv->hw;
119
120         PMD_INIT_FUNC_TRACE();
121
122         if (dpni == NULL) {
123                 DPAA2_PMD_ERR("dpni is NULL");
124                 return -1;
125         }
126
127         if (on)
128                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
129                                        priv->token, vlan_id);
130         else
131                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
132                                           priv->token, vlan_id);
133
134         if (ret < 0)
135                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
136                               ret, vlan_id, priv->hw_id);
137
138         return ret;
139 }
140
141 static int
142 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
143 {
144         struct dpaa2_dev_priv *priv = dev->data->dev_private;
145         struct fsl_mc_io *dpni = priv->hw;
146         int ret;
147
148         PMD_INIT_FUNC_TRACE();
149
150         if (mask & ETH_VLAN_FILTER_MASK) {
151                 /* VLAN Filter not avaialble */
152                 if (!priv->max_vlan_filters) {
153                         DPAA2_PMD_INFO("VLAN filter not available");
154                         goto next_mask;
155                 }
156
157                 if (dev->data->dev_conf.rxmode.offloads &
158                         DEV_RX_OFFLOAD_VLAN_FILTER)
159                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
160                                                       priv->token, true);
161                 else
162                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
163                                                       priv->token, false);
164                 if (ret < 0)
165                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
166         }
167 next_mask:
168         if (mask & ETH_VLAN_EXTEND_MASK) {
169                 if (dev->data->dev_conf.rxmode.offloads &
170                         DEV_RX_OFFLOAD_VLAN_EXTEND)
171                         DPAA2_PMD_INFO("VLAN extend offload not supported");
172         }
173
174         return 0;
175 }
176
177 static int
178 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
179                       enum rte_vlan_type vlan_type __rte_unused,
180                       uint16_t tpid)
181 {
182         struct dpaa2_dev_priv *priv = dev->data->dev_private;
183         struct fsl_mc_io *dpni = priv->hw;
184         int ret = -ENOTSUP;
185
186         PMD_INIT_FUNC_TRACE();
187
188         /* nothing to be done for standard vlan tpids */
189         if (tpid == 0x8100 || tpid == 0x88A8)
190                 return 0;
191
192         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
193                                    priv->token, tpid);
194         if (ret < 0)
195                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
196         /* if already configured tpids, remove them first */
197         if (ret == -EBUSY) {
198                 struct dpni_custom_tpid_cfg tpid_list = {0};
199
200                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
201                                    priv->token, &tpid_list);
202                 if (ret < 0)
203                         goto fail;
204                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
205                                    priv->token, tpid_list.tpid1);
206                 if (ret < 0)
207                         goto fail;
208                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
209                                            priv->token, tpid);
210         }
211 fail:
212         return ret;
213 }
214
215 static int
216 dpaa2_fw_version_get(struct rte_eth_dev *dev,
217                      char *fw_version,
218                      size_t fw_size)
219 {
220         int ret;
221         struct dpaa2_dev_priv *priv = dev->data->dev_private;
222         struct fsl_mc_io *dpni = priv->hw;
223         struct mc_soc_version mc_plat_info = {0};
224         struct mc_version mc_ver_info = {0};
225
226         PMD_INIT_FUNC_TRACE();
227
228         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
229                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
230
231         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
232                 DPAA2_PMD_WARN("\tmc_get_version failed");
233
234         ret = snprintf(fw_version, fw_size,
235                        "%x-%d.%d.%d",
236                        mc_plat_info.svr,
237                        mc_ver_info.major,
238                        mc_ver_info.minor,
239                        mc_ver_info.revision);
240
241         ret += 1; /* add the size of '\0' */
242         if (fw_size < (uint32_t)ret)
243                 return ret;
244         else
245                 return 0;
246 }
247
248 static int
249 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
250 {
251         struct dpaa2_dev_priv *priv = dev->data->dev_private;
252
253         PMD_INIT_FUNC_TRACE();
254
255         dev_info->if_index = priv->hw_id;
256
257         dev_info->max_mac_addrs = priv->max_mac_filters;
258         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
259         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
260         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
261         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
262         dev_info->rx_offload_capa = dev_rx_offloads_sup |
263                                         dev_rx_offloads_nodis;
264         dev_info->tx_offload_capa = dev_tx_offloads_sup |
265                                         dev_tx_offloads_nodis;
266         dev_info->speed_capa = ETH_LINK_SPEED_1G |
267                         ETH_LINK_SPEED_2_5G |
268                         ETH_LINK_SPEED_10G;
269
270         dev_info->max_hash_mac_addrs = 0;
271         dev_info->max_vfs = 0;
272         dev_info->max_vmdq_pools = ETH_16_POOLS;
273         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
274
275         return 0;
276 }
277
278 static int
279 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
280 {
281         struct dpaa2_dev_priv *priv = dev->data->dev_private;
282         uint16_t dist_idx;
283         uint32_t vq_id;
284         uint8_t num_rxqueue_per_tc;
285         struct dpaa2_queue *mc_q, *mcq;
286         uint32_t tot_queues;
287         int i;
288         struct dpaa2_queue *dpaa2_q;
289
290         PMD_INIT_FUNC_TRACE();
291
292         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
293         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
294         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
295                           RTE_CACHE_LINE_SIZE);
296         if (!mc_q) {
297                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
298                 return -1;
299         }
300
301         for (i = 0; i < priv->nb_rx_queues; i++) {
302                 mc_q->eth_data = dev->data;
303                 priv->rx_vq[i] = mc_q++;
304                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
305                 dpaa2_q->q_storage = rte_malloc("dq_storage",
306                                         sizeof(struct queue_storage_info_t),
307                                         RTE_CACHE_LINE_SIZE);
308                 if (!dpaa2_q->q_storage)
309                         goto fail;
310
311                 memset(dpaa2_q->q_storage, 0,
312                        sizeof(struct queue_storage_info_t));
313                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
314                         goto fail;
315         }
316
317         for (i = 0; i < priv->nb_tx_queues; i++) {
318                 mc_q->eth_data = dev->data;
319                 mc_q->flow_id = 0xffff;
320                 priv->tx_vq[i] = mc_q++;
321                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
322                 dpaa2_q->cscn = rte_malloc(NULL,
323                                            sizeof(struct qbman_result), 16);
324                 if (!dpaa2_q->cscn)
325                         goto fail_tx;
326         }
327
328         vq_id = 0;
329         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
330                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
331                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
332                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
333                 vq_id++;
334         }
335
336         return 0;
337 fail_tx:
338         i -= 1;
339         while (i >= 0) {
340                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
341                 rte_free(dpaa2_q->cscn);
342                 priv->tx_vq[i--] = NULL;
343         }
344         i = priv->nb_rx_queues;
345 fail:
346         i -= 1;
347         mc_q = priv->rx_vq[0];
348         while (i >= 0) {
349                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
350                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
351                 rte_free(dpaa2_q->q_storage);
352                 priv->rx_vq[i--] = NULL;
353         }
354         rte_free(mc_q);
355         return -1;
356 }
357
358 static void
359 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
360 {
361         struct dpaa2_dev_priv *priv = dev->data->dev_private;
362         struct dpaa2_queue *dpaa2_q;
363         int i;
364
365         PMD_INIT_FUNC_TRACE();
366
367         /* Queue allocation base */
368         if (priv->rx_vq[0]) {
369                 /* cleaning up queue storage */
370                 for (i = 0; i < priv->nb_rx_queues; i++) {
371                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
372                         if (dpaa2_q->q_storage)
373                                 rte_free(dpaa2_q->q_storage);
374                 }
375                 /* cleanup tx queue cscn */
376                 for (i = 0; i < priv->nb_tx_queues; i++) {
377                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
378                         rte_free(dpaa2_q->cscn);
379                 }
380                 /*free memory for all queues (RX+TX) */
381                 rte_free(priv->rx_vq[0]);
382                 priv->rx_vq[0] = NULL;
383         }
384 }
385
386 static int
387 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
388 {
389         struct dpaa2_dev_priv *priv = dev->data->dev_private;
390         struct fsl_mc_io *dpni = priv->hw;
391         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
392         uint64_t rx_offloads = eth_conf->rxmode.offloads;
393         uint64_t tx_offloads = eth_conf->txmode.offloads;
394         int rx_l3_csum_offload = false;
395         int rx_l4_csum_offload = false;
396         int tx_l3_csum_offload = false;
397         int tx_l4_csum_offload = false;
398         int ret;
399
400         PMD_INIT_FUNC_TRACE();
401
402         /* Rx offloads which are enabled by default */
403         if (dev_rx_offloads_nodis & ~rx_offloads) {
404                 DPAA2_PMD_INFO(
405                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
406                 " fixed are 0x%" PRIx64,
407                 rx_offloads, dev_rx_offloads_nodis);
408         }
409
410         /* Tx offloads which are enabled by default */
411         if (dev_tx_offloads_nodis & ~tx_offloads) {
412                 DPAA2_PMD_INFO(
413                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
414                 " fixed are 0x%" PRIx64,
415                 tx_offloads, dev_tx_offloads_nodis);
416         }
417
418         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
419                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
420                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
421                                 priv->token, eth_conf->rxmode.max_rx_pkt_len);
422                         if (ret) {
423                                 DPAA2_PMD_ERR(
424                                         "Unable to set mtu. check config");
425                                 return ret;
426                         }
427                 } else {
428                         return -1;
429                 }
430         }
431
432         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
433                 ret = dpaa2_setup_flow_dist(dev,
434                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
435                 if (ret) {
436                         DPAA2_PMD_ERR("Unable to set flow distribution."
437                                       "Check queue config");
438                         return ret;
439                 }
440         }
441
442         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
443                 rx_l3_csum_offload = true;
444
445         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
446                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
447                 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
448                 rx_l4_csum_offload = true;
449
450         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
451                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
452         if (ret) {
453                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
454                 return ret;
455         }
456
457         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
458                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
459         if (ret) {
460                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
461                 return ret;
462         }
463
464         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
465                 dpaa2_enable_ts = true;
466
467         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
468                 tx_l3_csum_offload = true;
469
470         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
471                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
472                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
473                 tx_l4_csum_offload = true;
474
475         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
476                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
477         if (ret) {
478                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
479                 return ret;
480         }
481
482         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
483                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
484         if (ret) {
485                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
486                 return ret;
487         }
488
489         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
490          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
491          * to 0 for LS2 in the hardware thus disabling data/annotation
492          * stashing. For LX2 this is fixed in hardware and thus hash result and
493          * parse results can be received in FD using this option.
494          */
495         if (dpaa2_svr_family == SVR_LX2160A) {
496                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
497                                        DPNI_FLCTYPE_HASH, true);
498                 if (ret) {
499                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
500                         return ret;
501                 }
502         }
503
504         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
505                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
506
507         /* update the current status */
508         dpaa2_dev_link_update(dev, 0);
509
510         return 0;
511 }
512
513 /* Function to setup RX flow information. It contains traffic class ID,
514  * flow ID, destination configuration etc.
515  */
516 static int
517 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
518                          uint16_t rx_queue_id,
519                          uint16_t nb_rx_desc,
520                          unsigned int socket_id __rte_unused,
521                          const struct rte_eth_rxconf *rx_conf __rte_unused,
522                          struct rte_mempool *mb_pool)
523 {
524         struct dpaa2_dev_priv *priv = dev->data->dev_private;
525         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
526         struct dpaa2_queue *dpaa2_q;
527         struct dpni_queue cfg;
528         uint8_t options = 0;
529         uint8_t flow_id;
530         uint32_t bpid;
531         int i, ret;
532
533         PMD_INIT_FUNC_TRACE();
534
535         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
536                         dev, rx_queue_id, mb_pool, rx_conf);
537
538         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
539                 bpid = mempool_to_bpid(mb_pool);
540                 ret = dpaa2_attach_bp_list(priv,
541                                            rte_dpaa2_bpid_info[bpid].bp_list);
542                 if (ret)
543                         return ret;
544         }
545         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
546         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
547         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
548
549         /*Get the flow id from given VQ id*/
550         flow_id = dpaa2_q->flow_id;
551         memset(&cfg, 0, sizeof(struct dpni_queue));
552
553         options = options | DPNI_QUEUE_OPT_USER_CTX;
554         cfg.user_context = (size_t)(dpaa2_q);
555
556         /* check if a private cgr available. */
557         for (i = 0; i < priv->max_cgs; i++) {
558                 if (!priv->cgid_in_use[i]) {
559                         priv->cgid_in_use[i] = 1;
560                         break;
561                 }
562         }
563
564         if (i < priv->max_cgs) {
565                 options |= DPNI_QUEUE_OPT_SET_CGID;
566                 cfg.cgid = i;
567                 dpaa2_q->cgid = cfg.cgid;
568         } else {
569                 dpaa2_q->cgid = 0xff;
570         }
571
572         /*if ls2088 or rev2 device, enable the stashing */
573
574         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
575                 options |= DPNI_QUEUE_OPT_FLC;
576                 cfg.flc.stash_control = true;
577                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
578                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
579                  * data stashing setting 01 01 00 (0x14)
580                  * (in following order ->DS AS CS)
581                  * to enable 1 line data, 1 line annotation.
582                  * For LX2, this setting should be 01 00 00 (0x10)
583                  */
584                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
585                         cfg.flc.value |= 0x10;
586                 else
587                         cfg.flc.value |= 0x14;
588         }
589         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
590                              dpaa2_q->tc_index, flow_id, options, &cfg);
591         if (ret) {
592                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
593                 return -1;
594         }
595
596         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
597                 struct dpni_taildrop taildrop;
598
599                 taildrop.enable = 1;
600
601                 /* Private CGR will use tail drop length as nb_rx_desc.
602                  * for rest cases we can use standard byte based tail drop.
603                  * There is no HW restriction, but number of CGRs are limited,
604                  * hence this restriction is placed.
605                  */
606                 if (dpaa2_q->cgid != 0xff) {
607                         /*enabling per rx queue congestion control */
608                         taildrop.threshold = nb_rx_desc;
609                         taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
610                         taildrop.oal = 0;
611                         DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
612                                         rx_queue_id);
613                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
614                                                 DPNI_CP_CONGESTION_GROUP,
615                                                 DPNI_QUEUE_RX,
616                                                 dpaa2_q->tc_index,
617                                                 flow_id, &taildrop);
618                 } else {
619                         /*enabling per rx queue congestion control */
620                         taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
621                         taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
622                         taildrop.oal = CONG_RX_OAL;
623                         DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
624                                         rx_queue_id);
625                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
626                                                 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
627                                                 dpaa2_q->tc_index, flow_id,
628                                                 &taildrop);
629                 }
630                 if (ret) {
631                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
632                                       ret);
633                         return -1;
634                 }
635         } else { /* Disable tail Drop */
636                 struct dpni_taildrop taildrop = {0};
637                 DPAA2_PMD_INFO("Tail drop is disabled on queue");
638
639                 taildrop.enable = 0;
640                 if (dpaa2_q->cgid != 0xff) {
641                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
642                                         DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
643                                         dpaa2_q->tc_index,
644                                         flow_id, &taildrop);
645                 } else {
646                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
647                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
648                                         dpaa2_q->tc_index, flow_id, &taildrop);
649                 }
650                 if (ret) {
651                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
652                                       ret);
653                         return -1;
654                 }
655         }
656
657         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
658         return 0;
659 }
660
661 static int
662 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
663                          uint16_t tx_queue_id,
664                          uint16_t nb_tx_desc __rte_unused,
665                          unsigned int socket_id __rte_unused,
666                          const struct rte_eth_txconf *tx_conf __rte_unused)
667 {
668         struct dpaa2_dev_priv *priv = dev->data->dev_private;
669         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
670                 priv->tx_vq[tx_queue_id];
671         struct fsl_mc_io *dpni = priv->hw;
672         struct dpni_queue tx_conf_cfg;
673         struct dpni_queue tx_flow_cfg;
674         uint8_t options = 0, flow_id;
675         uint32_t tc_id;
676         int ret;
677
678         PMD_INIT_FUNC_TRACE();
679
680         /* Return if queue already configured */
681         if (dpaa2_q->flow_id != 0xffff) {
682                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
683                 return 0;
684         }
685
686         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
687         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
688
689         tc_id = tx_queue_id;
690         flow_id = 0;
691
692         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
693                              tc_id, flow_id, options, &tx_flow_cfg);
694         if (ret) {
695                 DPAA2_PMD_ERR("Error in setting the tx flow: "
696                               "tc_id=%d, flow=%d err=%d",
697                               tc_id, flow_id, ret);
698                         return -1;
699         }
700
701         dpaa2_q->flow_id = flow_id;
702
703         if (tx_queue_id == 0) {
704                 /*Set tx-conf and error configuration*/
705                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
706                                                     priv->token,
707                                                     DPNI_CONF_DISABLE);
708                 if (ret) {
709                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
710                                       "err=%d", ret);
711                         return -1;
712                 }
713         }
714         dpaa2_q->tc_index = tc_id;
715
716         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
717                 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
718
719                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
720                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
721                 /* Notify that the queue is not congested when the data in
722                  * the queue is below this thershold.
723                  */
724                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
725                 cong_notif_cfg.message_ctx = 0;
726                 cong_notif_cfg.message_iova =
727                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
728                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
729                 cong_notif_cfg.notification_mode =
730                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
731                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
732                                          DPNI_CONG_OPT_COHERENT_WRITE;
733                 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
734
735                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
736                                                        priv->token,
737                                                        DPNI_QUEUE_TX,
738                                                        tc_id,
739                                                        &cong_notif_cfg);
740                 if (ret) {
741                         DPAA2_PMD_ERR(
742                            "Error in setting tx congestion notification: "
743                            "err=%d", ret);
744                         return -ret;
745                 }
746         }
747         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
748         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
749         return 0;
750 }
751
752 static void
753 dpaa2_dev_rx_queue_release(void *q __rte_unused)
754 {
755         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
756         struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
757         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
758         uint8_t options = 0;
759         int ret;
760         struct dpni_queue cfg;
761
762         memset(&cfg, 0, sizeof(struct dpni_queue));
763         PMD_INIT_FUNC_TRACE();
764         if (dpaa2_q->cgid != 0xff) {
765                 options = DPNI_QUEUE_OPT_CLEAR_CGID;
766                 cfg.cgid = dpaa2_q->cgid;
767
768                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
769                                      DPNI_QUEUE_RX,
770                                      dpaa2_q->tc_index, dpaa2_q->flow_id,
771                                      options, &cfg);
772                 if (ret)
773                         DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
774                                         dpaa2_q->fqid, ret);
775                 priv->cgid_in_use[dpaa2_q->cgid] = 0;
776                 dpaa2_q->cgid = 0xff;
777         }
778 }
779
780 static void
781 dpaa2_dev_tx_queue_release(void *q __rte_unused)
782 {
783         PMD_INIT_FUNC_TRACE();
784 }
785
786 static uint32_t
787 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
788 {
789         int32_t ret;
790         struct dpaa2_dev_priv *priv = dev->data->dev_private;
791         struct dpaa2_queue *dpaa2_q;
792         struct qbman_swp *swp;
793         struct qbman_fq_query_np_rslt state;
794         uint32_t frame_cnt = 0;
795
796         PMD_INIT_FUNC_TRACE();
797
798         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
799                 ret = dpaa2_affine_qbman_swp();
800                 if (ret) {
801                         DPAA2_PMD_ERR("Failure in affining portal");
802                         return -EINVAL;
803                 }
804         }
805         swp = DPAA2_PER_LCORE_PORTAL;
806
807         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
808
809         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
810                 frame_cnt = qbman_fq_state_frame_count(&state);
811                 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
812                                 rx_queue_id, frame_cnt);
813         }
814         return frame_cnt;
815 }
816
817 static const uint32_t *
818 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
819 {
820         static const uint32_t ptypes[] = {
821                 /*todo -= add more types */
822                 RTE_PTYPE_L2_ETHER,
823                 RTE_PTYPE_L3_IPV4,
824                 RTE_PTYPE_L3_IPV4_EXT,
825                 RTE_PTYPE_L3_IPV6,
826                 RTE_PTYPE_L3_IPV6_EXT,
827                 RTE_PTYPE_L4_TCP,
828                 RTE_PTYPE_L4_UDP,
829                 RTE_PTYPE_L4_SCTP,
830                 RTE_PTYPE_L4_ICMP,
831                 RTE_PTYPE_UNKNOWN
832         };
833
834         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
835                 dev->rx_pkt_burst == dpaa2_dev_rx ||
836                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
837                 return ptypes;
838         return NULL;
839 }
840
841 /**
842  * Dpaa2 link Interrupt handler
843  *
844  * @param param
845  *  The address of parameter (struct rte_eth_dev *) regsitered before.
846  *
847  * @return
848  *  void
849  */
850 static void
851 dpaa2_interrupt_handler(void *param)
852 {
853         struct rte_eth_dev *dev = param;
854         struct dpaa2_dev_priv *priv = dev->data->dev_private;
855         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
856         int ret;
857         int irq_index = DPNI_IRQ_INDEX;
858         unsigned int status = 0, clear = 0;
859
860         PMD_INIT_FUNC_TRACE();
861
862         if (dpni == NULL) {
863                 DPAA2_PMD_ERR("dpni is NULL");
864                 return;
865         }
866
867         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
868                                   irq_index, &status);
869         if (unlikely(ret)) {
870                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
871                 clear = 0xffffffff;
872                 goto out;
873         }
874
875         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
876                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
877                 dpaa2_dev_link_update(dev, 0);
878                 /* calling all the apps registered for link status event */
879                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
880                                               NULL);
881         }
882 out:
883         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
884                                     irq_index, clear);
885         if (unlikely(ret))
886                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
887 }
888
889 static int
890 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
891 {
892         int err = 0;
893         struct dpaa2_dev_priv *priv = dev->data->dev_private;
894         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
895         int irq_index = DPNI_IRQ_INDEX;
896         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
897
898         PMD_INIT_FUNC_TRACE();
899
900         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
901                                 irq_index, mask);
902         if (err < 0) {
903                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
904                               strerror(-err));
905                 return err;
906         }
907
908         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
909                                   irq_index, enable);
910         if (err < 0)
911                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
912                               strerror(-err));
913
914         return err;
915 }
916
917 static int
918 dpaa2_dev_start(struct rte_eth_dev *dev)
919 {
920         struct rte_device *rdev = dev->device;
921         struct rte_dpaa2_device *dpaa2_dev;
922         struct rte_eth_dev_data *data = dev->data;
923         struct dpaa2_dev_priv *priv = data->dev_private;
924         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
925         struct dpni_queue cfg;
926         struct dpni_error_cfg   err_cfg;
927         uint16_t qdid;
928         struct dpni_queue_id qid;
929         struct dpaa2_queue *dpaa2_q;
930         int ret, i;
931         struct rte_intr_handle *intr_handle;
932
933         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
934         intr_handle = &dpaa2_dev->intr_handle;
935
936         PMD_INIT_FUNC_TRACE();
937
938         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
939         if (ret) {
940                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
941                               priv->hw_id, ret);
942                 return ret;
943         }
944
945         /* Power up the phy. Needed to make the link go UP */
946         dpaa2_dev_set_link_up(dev);
947
948         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
949                             DPNI_QUEUE_TX, &qdid);
950         if (ret) {
951                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
952                 return ret;
953         }
954         priv->qdid = qdid;
955
956         for (i = 0; i < data->nb_rx_queues; i++) {
957                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
958                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
959                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
960                                        dpaa2_q->flow_id, &cfg, &qid);
961                 if (ret) {
962                         DPAA2_PMD_ERR("Error in getting flow information: "
963                                       "err=%d", ret);
964                         return ret;
965                 }
966                 dpaa2_q->fqid = qid.fqid;
967         }
968
969         /*checksum errors, send them to normal path and set it in annotation */
970         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
971         err_cfg.errors |= DPNI_ERROR_PHE;
972
973         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
974         err_cfg.set_frame_annotation = true;
975
976         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
977                                        priv->token, &err_cfg);
978         if (ret) {
979                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
980                               ret);
981                 return ret;
982         }
983
984         /* if the interrupts were configured on this devices*/
985         if (intr_handle && (intr_handle->fd) &&
986             (dev->data->dev_conf.intr_conf.lsc != 0)) {
987                 /* Registering LSC interrupt handler */
988                 rte_intr_callback_register(intr_handle,
989                                            dpaa2_interrupt_handler,
990                                            (void *)dev);
991
992                 /* enable vfio intr/eventfd mapping
993                  * Interrupt index 0 is required, so we can not use
994                  * rte_intr_enable.
995                  */
996                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
997
998                 /* enable dpni_irqs */
999                 dpaa2_eth_setup_irqs(dev, 1);
1000         }
1001
1002         /* Change the tx burst function if ordered queues are used */
1003         if (priv->en_ordered)
1004                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1005
1006         return 0;
1007 }
1008
1009 /**
1010  *  This routine disables all traffic on the adapter by issuing a
1011  *  global reset on the MAC.
1012  */
1013 static void
1014 dpaa2_dev_stop(struct rte_eth_dev *dev)
1015 {
1016         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1017         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1018         int ret;
1019         struct rte_eth_link link;
1020         struct rte_intr_handle *intr_handle = dev->intr_handle;
1021
1022         PMD_INIT_FUNC_TRACE();
1023
1024         /* reset interrupt callback  */
1025         if (intr_handle && (intr_handle->fd) &&
1026             (dev->data->dev_conf.intr_conf.lsc != 0)) {
1027                 /*disable dpni irqs */
1028                 dpaa2_eth_setup_irqs(dev, 0);
1029
1030                 /* disable vfio intr before callback unregister */
1031                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1032
1033                 /* Unregistering LSC interrupt handler */
1034                 rte_intr_callback_unregister(intr_handle,
1035                                              dpaa2_interrupt_handler,
1036                                              (void *)dev);
1037         }
1038
1039         dpaa2_dev_set_link_down(dev);
1040
1041         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1042         if (ret) {
1043                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1044                               ret, priv->hw_id);
1045                 return;
1046         }
1047
1048         /* clear the recorded link status */
1049         memset(&link, 0, sizeof(link));
1050         rte_eth_linkstatus_set(dev, &link);
1051 }
1052
1053 static void
1054 dpaa2_dev_close(struct rte_eth_dev *dev)
1055 {
1056         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1057         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1058         int ret;
1059         struct rte_eth_link link;
1060
1061         PMD_INIT_FUNC_TRACE();
1062
1063         dpaa2_flow_clean(dev);
1064
1065         /* Clean the device first */
1066         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1067         if (ret) {
1068                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1069                 return;
1070         }
1071
1072         memset(&link, 0, sizeof(link));
1073         rte_eth_linkstatus_set(dev, &link);
1074 }
1075
1076 static int
1077 dpaa2_dev_promiscuous_enable(
1078                 struct rte_eth_dev *dev)
1079 {
1080         int ret;
1081         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1082         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1083
1084         PMD_INIT_FUNC_TRACE();
1085
1086         if (dpni == NULL) {
1087                 DPAA2_PMD_ERR("dpni is NULL");
1088                 return -ENODEV;
1089         }
1090
1091         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1092         if (ret < 0)
1093                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1094
1095         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1096         if (ret < 0)
1097                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1098
1099         return ret;
1100 }
1101
1102 static int
1103 dpaa2_dev_promiscuous_disable(
1104                 struct rte_eth_dev *dev)
1105 {
1106         int ret;
1107         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1108         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1109
1110         PMD_INIT_FUNC_TRACE();
1111
1112         if (dpni == NULL) {
1113                 DPAA2_PMD_ERR("dpni is NULL");
1114                 return -ENODEV;
1115         }
1116
1117         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1118         if (ret < 0)
1119                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1120
1121         if (dev->data->all_multicast == 0) {
1122                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1123                                                  priv->token, false);
1124                 if (ret < 0)
1125                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1126                                       ret);
1127         }
1128
1129         return ret;
1130 }
1131
1132 static int
1133 dpaa2_dev_allmulticast_enable(
1134                 struct rte_eth_dev *dev)
1135 {
1136         int ret;
1137         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1138         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1139
1140         PMD_INIT_FUNC_TRACE();
1141
1142         if (dpni == NULL) {
1143                 DPAA2_PMD_ERR("dpni is NULL");
1144                 return -ENODEV;
1145         }
1146
1147         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1148         if (ret < 0)
1149                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1150
1151         return ret;
1152 }
1153
1154 static int
1155 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1156 {
1157         int ret;
1158         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1159         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1160
1161         PMD_INIT_FUNC_TRACE();
1162
1163         if (dpni == NULL) {
1164                 DPAA2_PMD_ERR("dpni is NULL");
1165                 return -ENODEV;
1166         }
1167
1168         /* must remain on for all promiscuous */
1169         if (dev->data->promiscuous == 1)
1170                 return 0;
1171
1172         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1173         if (ret < 0)
1174                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1175
1176         return ret;
1177 }
1178
1179 static int
1180 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1181 {
1182         int ret;
1183         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1184         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1185         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1186                                 + VLAN_TAG_SIZE;
1187
1188         PMD_INIT_FUNC_TRACE();
1189
1190         if (dpni == NULL) {
1191                 DPAA2_PMD_ERR("dpni is NULL");
1192                 return -EINVAL;
1193         }
1194
1195         /* check that mtu is within the allowed range */
1196         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1197                 return -EINVAL;
1198
1199         if (frame_size > RTE_ETHER_MAX_LEN)
1200                 dev->data->dev_conf.rxmode.offloads &=
1201                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1202         else
1203                 dev->data->dev_conf.rxmode.offloads &=
1204                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1205
1206         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1207
1208         /* Set the Max Rx frame length as 'mtu' +
1209          * Maximum Ethernet header length
1210          */
1211         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1212                                         frame_size);
1213         if (ret) {
1214                 DPAA2_PMD_ERR("Setting the max frame length failed");
1215                 return -1;
1216         }
1217         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1218         return 0;
1219 }
1220
1221 static int
1222 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1223                        struct rte_ether_addr *addr,
1224                        __rte_unused uint32_t index,
1225                        __rte_unused uint32_t pool)
1226 {
1227         int ret;
1228         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1229         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1230
1231         PMD_INIT_FUNC_TRACE();
1232
1233         if (dpni == NULL) {
1234                 DPAA2_PMD_ERR("dpni is NULL");
1235                 return -1;
1236         }
1237
1238         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1239                                 priv->token, addr->addr_bytes);
1240         if (ret)
1241                 DPAA2_PMD_ERR(
1242                         "error: Adding the MAC ADDR failed: err = %d", ret);
1243         return 0;
1244 }
1245
1246 static void
1247 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1248                           uint32_t index)
1249 {
1250         int ret;
1251         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1252         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1253         struct rte_eth_dev_data *data = dev->data;
1254         struct rte_ether_addr *macaddr;
1255
1256         PMD_INIT_FUNC_TRACE();
1257
1258         macaddr = &data->mac_addrs[index];
1259
1260         if (dpni == NULL) {
1261                 DPAA2_PMD_ERR("dpni is NULL");
1262                 return;
1263         }
1264
1265         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1266                                    priv->token, macaddr->addr_bytes);
1267         if (ret)
1268                 DPAA2_PMD_ERR(
1269                         "error: Removing the MAC ADDR failed: err = %d", ret);
1270 }
1271
1272 static int
1273 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1274                        struct rte_ether_addr *addr)
1275 {
1276         int ret;
1277         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1278         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1279
1280         PMD_INIT_FUNC_TRACE();
1281
1282         if (dpni == NULL) {
1283                 DPAA2_PMD_ERR("dpni is NULL");
1284                 return -EINVAL;
1285         }
1286
1287         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1288                                         priv->token, addr->addr_bytes);
1289
1290         if (ret)
1291                 DPAA2_PMD_ERR(
1292                         "error: Setting the MAC ADDR failed %d", ret);
1293
1294         return ret;
1295 }
1296
1297 static
1298 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1299                          struct rte_eth_stats *stats)
1300 {
1301         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1302         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1303         int32_t  retcode;
1304         uint8_t page0 = 0, page1 = 1, page2 = 2;
1305         union dpni_statistics value;
1306         int i;
1307         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1308
1309         memset(&value, 0, sizeof(union dpni_statistics));
1310
1311         PMD_INIT_FUNC_TRACE();
1312
1313         if (!dpni) {
1314                 DPAA2_PMD_ERR("dpni is NULL");
1315                 return -EINVAL;
1316         }
1317
1318         if (!stats) {
1319                 DPAA2_PMD_ERR("stats is NULL");
1320                 return -EINVAL;
1321         }
1322
1323         /*Get Counters from page_0*/
1324         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1325                                       page0, 0, &value);
1326         if (retcode)
1327                 goto err;
1328
1329         stats->ipackets = value.page_0.ingress_all_frames;
1330         stats->ibytes = value.page_0.ingress_all_bytes;
1331
1332         /*Get Counters from page_1*/
1333         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1334                                       page1, 0, &value);
1335         if (retcode)
1336                 goto err;
1337
1338         stats->opackets = value.page_1.egress_all_frames;
1339         stats->obytes = value.page_1.egress_all_bytes;
1340
1341         /*Get Counters from page_2*/
1342         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1343                                       page2, 0, &value);
1344         if (retcode)
1345                 goto err;
1346
1347         /* Ingress drop frame count due to configured rules */
1348         stats->ierrors = value.page_2.ingress_filtered_frames;
1349         /* Ingress drop frame count due to error */
1350         stats->ierrors += value.page_2.ingress_discarded_frames;
1351
1352         stats->oerrors = value.page_2.egress_discarded_frames;
1353         stats->imissed = value.page_2.ingress_nobuffer_discards;
1354
1355         /* Fill in per queue stats */
1356         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1357                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1358                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1359                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1360                 if (dpaa2_rxq)
1361                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1362                 if (dpaa2_txq)
1363                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1364
1365                 /* Byte counting is not implemented */
1366                 stats->q_ibytes[i]   = 0;
1367                 stats->q_obytes[i]   = 0;
1368         }
1369
1370         return 0;
1371
1372 err:
1373         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1374         return retcode;
1375 };
1376
1377 static int
1378 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1379                      unsigned int n)
1380 {
1381         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1382         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1383         int32_t  retcode;
1384         union dpni_statistics value[3] = {};
1385         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1386
1387         if (n < num)
1388                 return num;
1389
1390         if (xstats == NULL)
1391                 return 0;
1392
1393         /* Get Counters from page_0*/
1394         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1395                                       0, 0, &value[0]);
1396         if (retcode)
1397                 goto err;
1398
1399         /* Get Counters from page_1*/
1400         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1401                                       1, 0, &value[1]);
1402         if (retcode)
1403                 goto err;
1404
1405         /* Get Counters from page_2*/
1406         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1407                                       2, 0, &value[2]);
1408         if (retcode)
1409                 goto err;
1410
1411         for (i = 0; i < num; i++) {
1412                 xstats[i].id = i;
1413                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1414                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1415         }
1416         return i;
1417 err:
1418         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1419         return retcode;
1420 }
1421
1422 static int
1423 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1424                        struct rte_eth_xstat_name *xstats_names,
1425                        unsigned int limit)
1426 {
1427         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1428
1429         if (limit < stat_cnt)
1430                 return stat_cnt;
1431
1432         if (xstats_names != NULL)
1433                 for (i = 0; i < stat_cnt; i++)
1434                         strlcpy(xstats_names[i].name,
1435                                 dpaa2_xstats_strings[i].name,
1436                                 sizeof(xstats_names[i].name));
1437
1438         return stat_cnt;
1439 }
1440
1441 static int
1442 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1443                        uint64_t *values, unsigned int n)
1444 {
1445         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1446         uint64_t values_copy[stat_cnt];
1447
1448         if (!ids) {
1449                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1450                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1451                 int32_t  retcode;
1452                 union dpni_statistics value[3] = {};
1453
1454                 if (n < stat_cnt)
1455                         return stat_cnt;
1456
1457                 if (!values)
1458                         return 0;
1459
1460                 /* Get Counters from page_0*/
1461                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1462                                               0, 0, &value[0]);
1463                 if (retcode)
1464                         return 0;
1465
1466                 /* Get Counters from page_1*/
1467                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1468                                               1, 0, &value[1]);
1469                 if (retcode)
1470                         return 0;
1471
1472                 /* Get Counters from page_2*/
1473                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1474                                               2, 0, &value[2]);
1475                 if (retcode)
1476                         return 0;
1477
1478                 for (i = 0; i < stat_cnt; i++) {
1479                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1480                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1481                 }
1482                 return stat_cnt;
1483         }
1484
1485         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1486
1487         for (i = 0; i < n; i++) {
1488                 if (ids[i] >= stat_cnt) {
1489                         DPAA2_PMD_ERR("xstats id value isn't valid");
1490                         return -1;
1491                 }
1492                 values[i] = values_copy[ids[i]];
1493         }
1494         return n;
1495 }
1496
1497 static int
1498 dpaa2_xstats_get_names_by_id(
1499         struct rte_eth_dev *dev,
1500         struct rte_eth_xstat_name *xstats_names,
1501         const uint64_t *ids,
1502         unsigned int limit)
1503 {
1504         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1505         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1506
1507         if (!ids)
1508                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1509
1510         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1511
1512         for (i = 0; i < limit; i++) {
1513                 if (ids[i] >= stat_cnt) {
1514                         DPAA2_PMD_ERR("xstats id value isn't valid");
1515                         return -1;
1516                 }
1517                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1518         }
1519         return limit;
1520 }
1521
1522 static int
1523 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1524 {
1525         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1526         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1527         int retcode;
1528         int i;
1529         struct dpaa2_queue *dpaa2_q;
1530
1531         PMD_INIT_FUNC_TRACE();
1532
1533         if (dpni == NULL) {
1534                 DPAA2_PMD_ERR("dpni is NULL");
1535                 return -EINVAL;
1536         }
1537
1538         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1539         if (retcode)
1540                 goto error;
1541
1542         /* Reset the per queue stats in dpaa2_queue structure */
1543         for (i = 0; i < priv->nb_rx_queues; i++) {
1544                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1545                 if (dpaa2_q)
1546                         dpaa2_q->rx_pkts = 0;
1547         }
1548
1549         for (i = 0; i < priv->nb_tx_queues; i++) {
1550                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1551                 if (dpaa2_q)
1552                         dpaa2_q->tx_pkts = 0;
1553         }
1554
1555         return 0;
1556
1557 error:
1558         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1559         return retcode;
1560 };
1561
1562 /* return 0 means link status changed, -1 means not changed */
1563 static int
1564 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1565                         int wait_to_complete __rte_unused)
1566 {
1567         int ret;
1568         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1569         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1570         struct rte_eth_link link;
1571         struct dpni_link_state state = {0};
1572
1573         if (dpni == NULL) {
1574                 DPAA2_PMD_ERR("dpni is NULL");
1575                 return 0;
1576         }
1577
1578         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1579         if (ret < 0) {
1580                 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1581                 return -1;
1582         }
1583
1584         memset(&link, 0, sizeof(struct rte_eth_link));
1585         link.link_status = state.up;
1586         link.link_speed = state.rate;
1587
1588         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1589                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1590         else
1591                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1592
1593         ret = rte_eth_linkstatus_set(dev, &link);
1594         if (ret == -1)
1595                 DPAA2_PMD_DEBUG("No change in status");
1596         else
1597                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1598                                link.link_status ? "Up" : "Down");
1599
1600         return ret;
1601 }
1602
1603 /**
1604  * Toggle the DPNI to enable, if not already enabled.
1605  * This is not strictly PHY up/down - it is more of logical toggling.
1606  */
1607 static int
1608 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1609 {
1610         int ret = -EINVAL;
1611         struct dpaa2_dev_priv *priv;
1612         struct fsl_mc_io *dpni;
1613         int en = 0;
1614         struct dpni_link_state state = {0};
1615
1616         priv = dev->data->dev_private;
1617         dpni = (struct fsl_mc_io *)priv->hw;
1618
1619         if (dpni == NULL) {
1620                 DPAA2_PMD_ERR("dpni is NULL");
1621                 return ret;
1622         }
1623
1624         /* Check if DPNI is currently enabled */
1625         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1626         if (ret) {
1627                 /* Unable to obtain dpni status; Not continuing */
1628                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1629                 return -EINVAL;
1630         }
1631
1632         /* Enable link if not already enabled */
1633         if (!en) {
1634                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1635                 if (ret) {
1636                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1637                         return -EINVAL;
1638                 }
1639         }
1640         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1641         if (ret < 0) {
1642                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1643                 return -1;
1644         }
1645
1646         /* changing tx burst function to start enqueues */
1647         dev->tx_pkt_burst = dpaa2_dev_tx;
1648         dev->data->dev_link.link_status = state.up;
1649
1650         if (state.up)
1651                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1652         else
1653                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1654         return ret;
1655 }
1656
1657 /**
1658  * Toggle the DPNI to disable, if not already disabled.
1659  * This is not strictly PHY up/down - it is more of logical toggling.
1660  */
1661 static int
1662 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1663 {
1664         int ret = -EINVAL;
1665         struct dpaa2_dev_priv *priv;
1666         struct fsl_mc_io *dpni;
1667         int dpni_enabled = 0;
1668         int retries = 10;
1669
1670         PMD_INIT_FUNC_TRACE();
1671
1672         priv = dev->data->dev_private;
1673         dpni = (struct fsl_mc_io *)priv->hw;
1674
1675         if (dpni == NULL) {
1676                 DPAA2_PMD_ERR("Device has not yet been configured");
1677                 return ret;
1678         }
1679
1680         /*changing  tx burst function to avoid any more enqueues */
1681         dev->tx_pkt_burst = dummy_dev_tx;
1682
1683         /* Loop while dpni_disable() attempts to drain the egress FQs
1684          * and confirm them back to us.
1685          */
1686         do {
1687                 ret = dpni_disable(dpni, 0, priv->token);
1688                 if (ret) {
1689                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1690                         return ret;
1691                 }
1692                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1693                 if (ret) {
1694                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1695                         return ret;
1696                 }
1697                 if (dpni_enabled)
1698                         /* Allow the MC some slack */
1699                         rte_delay_us(100 * 1000);
1700         } while (dpni_enabled && --retries);
1701
1702         if (!retries) {
1703                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1704                 /* todo- we may have to manually cleanup queues.
1705                  */
1706         } else {
1707                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1708                                dev->data->port_id);
1709         }
1710
1711         dev->data->dev_link.link_status = 0;
1712
1713         return ret;
1714 }
1715
1716 static int
1717 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1718 {
1719         int ret = -EINVAL;
1720         struct dpaa2_dev_priv *priv;
1721         struct fsl_mc_io *dpni;
1722         struct dpni_link_state state = {0};
1723
1724         PMD_INIT_FUNC_TRACE();
1725
1726         priv = dev->data->dev_private;
1727         dpni = (struct fsl_mc_io *)priv->hw;
1728
1729         if (dpni == NULL || fc_conf == NULL) {
1730                 DPAA2_PMD_ERR("device not configured");
1731                 return ret;
1732         }
1733
1734         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1735         if (ret) {
1736                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1737                 return ret;
1738         }
1739
1740         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1741         if (state.options & DPNI_LINK_OPT_PAUSE) {
1742                 /* DPNI_LINK_OPT_PAUSE set
1743                  *  if ASYM_PAUSE not set,
1744                  *      RX Side flow control (handle received Pause frame)
1745                  *      TX side flow control (send Pause frame)
1746                  *  if ASYM_PAUSE set,
1747                  *      RX Side flow control (handle received Pause frame)
1748                  *      No TX side flow control (send Pause frame disabled)
1749                  */
1750                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1751                         fc_conf->mode = RTE_FC_FULL;
1752                 else
1753                         fc_conf->mode = RTE_FC_RX_PAUSE;
1754         } else {
1755                 /* DPNI_LINK_OPT_PAUSE not set
1756                  *  if ASYM_PAUSE set,
1757                  *      TX side flow control (send Pause frame)
1758                  *      No RX side flow control (No action on pause frame rx)
1759                  *  if ASYM_PAUSE not set,
1760                  *      Flow control disabled
1761                  */
1762                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1763                         fc_conf->mode = RTE_FC_TX_PAUSE;
1764                 else
1765                         fc_conf->mode = RTE_FC_NONE;
1766         }
1767
1768         return ret;
1769 }
1770
1771 static int
1772 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1773 {
1774         int ret = -EINVAL;
1775         struct dpaa2_dev_priv *priv;
1776         struct fsl_mc_io *dpni;
1777         struct dpni_link_state state = {0};
1778         struct dpni_link_cfg cfg = {0};
1779
1780         PMD_INIT_FUNC_TRACE();
1781
1782         priv = dev->data->dev_private;
1783         dpni = (struct fsl_mc_io *)priv->hw;
1784
1785         if (dpni == NULL) {
1786                 DPAA2_PMD_ERR("dpni is NULL");
1787                 return ret;
1788         }
1789
1790         /* It is necessary to obtain the current state before setting fc_conf
1791          * as MC would return error in case rate, autoneg or duplex values are
1792          * different.
1793          */
1794         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1795         if (ret) {
1796                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1797                 return -1;
1798         }
1799
1800         /* Disable link before setting configuration */
1801         dpaa2_dev_set_link_down(dev);
1802
1803         /* Based on fc_conf, update cfg */
1804         cfg.rate = state.rate;
1805         cfg.options = state.options;
1806
1807         /* update cfg with fc_conf */
1808         switch (fc_conf->mode) {
1809         case RTE_FC_FULL:
1810                 /* Full flow control;
1811                  * OPT_PAUSE set, ASYM_PAUSE not set
1812                  */
1813                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1814                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1815                 break;
1816         case RTE_FC_TX_PAUSE:
1817                 /* Enable RX flow control
1818                  * OPT_PAUSE not set;
1819                  * ASYM_PAUSE set;
1820                  */
1821                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1822                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1823                 break;
1824         case RTE_FC_RX_PAUSE:
1825                 /* Enable TX Flow control
1826                  * OPT_PAUSE set
1827                  * ASYM_PAUSE set
1828                  */
1829                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1830                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1831                 break;
1832         case RTE_FC_NONE:
1833                 /* Disable Flow control
1834                  * OPT_PAUSE not set
1835                  * ASYM_PAUSE not set
1836                  */
1837                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1838                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1839                 break;
1840         default:
1841                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1842                               fc_conf->mode);
1843                 return -1;
1844         }
1845
1846         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1847         if (ret)
1848                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1849                               ret);
1850
1851         /* Enable link */
1852         dpaa2_dev_set_link_up(dev);
1853
1854         return ret;
1855 }
1856
1857 static int
1858 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1859                           struct rte_eth_rss_conf *rss_conf)
1860 {
1861         struct rte_eth_dev_data *data = dev->data;
1862         struct rte_eth_conf *eth_conf = &data->dev_conf;
1863         int ret;
1864
1865         PMD_INIT_FUNC_TRACE();
1866
1867         if (rss_conf->rss_hf) {
1868                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1869                 if (ret) {
1870                         DPAA2_PMD_ERR("Unable to set flow dist");
1871                         return ret;
1872                 }
1873         } else {
1874                 ret = dpaa2_remove_flow_dist(dev, 0);
1875                 if (ret) {
1876                         DPAA2_PMD_ERR("Unable to remove flow dist");
1877                         return ret;
1878                 }
1879         }
1880         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1881         return 0;
1882 }
1883
1884 static int
1885 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1886                             struct rte_eth_rss_conf *rss_conf)
1887 {
1888         struct rte_eth_dev_data *data = dev->data;
1889         struct rte_eth_conf *eth_conf = &data->dev_conf;
1890
1891         /* dpaa2 does not support rss_key, so length should be 0*/
1892         rss_conf->rss_key_len = 0;
1893         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1894         return 0;
1895 }
1896
1897 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1898                 int eth_rx_queue_id,
1899                 uint16_t dpcon_id,
1900                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1901 {
1902         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1903         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1904         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1905         uint8_t flow_id = dpaa2_ethq->flow_id;
1906         struct dpni_queue cfg;
1907         uint8_t options;
1908         int ret;
1909
1910         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1911                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1912         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1913                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1914         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
1915                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
1916         else
1917                 return -EINVAL;
1918
1919         memset(&cfg, 0, sizeof(struct dpni_queue));
1920         options = DPNI_QUEUE_OPT_DEST;
1921         cfg.destination.type = DPNI_DEST_DPCON;
1922         cfg.destination.id = dpcon_id;
1923         cfg.destination.priority = queue_conf->ev.priority;
1924
1925         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1926                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1927                 cfg.destination.hold_active = 1;
1928         }
1929
1930         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
1931                         !eth_priv->en_ordered) {
1932                 struct opr_cfg ocfg;
1933
1934                 /* Restoration window size = 256 frames */
1935                 ocfg.oprrws = 3;
1936                 /* Restoration window size = 512 frames for LX2 */
1937                 if (dpaa2_svr_family == SVR_LX2160A)
1938                         ocfg.oprrws = 4;
1939                 /* Auto advance NESN window enabled */
1940                 ocfg.oa = 1;
1941                 /* Late arrival window size disabled */
1942                 ocfg.olws = 0;
1943                 /* ORL resource exhaustaion advance NESN disabled */
1944                 ocfg.oeane = 0;
1945                 /* Loose ordering enabled */
1946                 ocfg.oloe = 1;
1947                 eth_priv->en_loose_ordered = 1;
1948                 /* Strict ordering enabled if explicitly set */
1949                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1950                         ocfg.oloe = 0;
1951                         eth_priv->en_loose_ordered = 0;
1952                 }
1953
1954                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
1955                                    dpaa2_ethq->tc_index, flow_id,
1956                                    OPR_OPT_CREATE, &ocfg);
1957                 if (ret) {
1958                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
1959                         return ret;
1960                 }
1961
1962                 eth_priv->en_ordered = 1;
1963         }
1964
1965         options |= DPNI_QUEUE_OPT_USER_CTX;
1966         cfg.user_context = (size_t)(dpaa2_ethq);
1967
1968         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1969                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1970         if (ret) {
1971                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1972                 return ret;
1973         }
1974
1975         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1976
1977         return 0;
1978 }
1979
1980 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1981                 int eth_rx_queue_id)
1982 {
1983         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1984         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1985         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1986         uint8_t flow_id = dpaa2_ethq->flow_id;
1987         struct dpni_queue cfg;
1988         uint8_t options;
1989         int ret;
1990
1991         memset(&cfg, 0, sizeof(struct dpni_queue));
1992         options = DPNI_QUEUE_OPT_DEST;
1993         cfg.destination.type = DPNI_DEST_NONE;
1994
1995         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1996                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1997         if (ret)
1998                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1999
2000         return ret;
2001 }
2002
2003 static inline int
2004 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
2005 {
2006         unsigned int i;
2007
2008         for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
2009                 if (dpaa2_supported_filter_ops[i] == filter_op)
2010                         return 0;
2011         }
2012         return -ENOTSUP;
2013 }
2014
2015 static int
2016 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
2017                     enum rte_filter_type filter_type,
2018                                  enum rte_filter_op filter_op,
2019                                  void *arg)
2020 {
2021         int ret = 0;
2022
2023         if (!dev)
2024                 return -ENODEV;
2025
2026         switch (filter_type) {
2027         case RTE_ETH_FILTER_GENERIC:
2028                 if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
2029                         ret = -ENOTSUP;
2030                         break;
2031                 }
2032                 *(const void **)arg = &dpaa2_flow_ops;
2033                 dpaa2_filter_type |= filter_type;
2034                 break;
2035         default:
2036                 RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
2037                         filter_type);
2038                 ret = -ENOTSUP;
2039                 break;
2040         }
2041         return ret;
2042 }
2043
2044 static struct eth_dev_ops dpaa2_ethdev_ops = {
2045         .dev_configure    = dpaa2_eth_dev_configure,
2046         .dev_start            = dpaa2_dev_start,
2047         .dev_stop             = dpaa2_dev_stop,
2048         .dev_close            = dpaa2_dev_close,
2049         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2050         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2051         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2052         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2053         .dev_set_link_up      = dpaa2_dev_set_link_up,
2054         .dev_set_link_down    = dpaa2_dev_set_link_down,
2055         .link_update       = dpaa2_dev_link_update,
2056         .stats_get             = dpaa2_dev_stats_get,
2057         .xstats_get            = dpaa2_dev_xstats_get,
2058         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
2059         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2060         .xstats_get_names      = dpaa2_xstats_get_names,
2061         .stats_reset       = dpaa2_dev_stats_reset,
2062         .xstats_reset         = dpaa2_dev_stats_reset,
2063         .fw_version_get    = dpaa2_fw_version_get,
2064         .dev_infos_get     = dpaa2_dev_info_get,
2065         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2066         .mtu_set           = dpaa2_dev_mtu_set,
2067         .vlan_filter_set      = dpaa2_vlan_filter_set,
2068         .vlan_offload_set     = dpaa2_vlan_offload_set,
2069         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
2070         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2071         .rx_queue_release  = dpaa2_dev_rx_queue_release,
2072         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2073         .tx_queue_release  = dpaa2_dev_tx_queue_release,
2074         .rx_queue_count       = dpaa2_dev_rx_queue_count,
2075         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
2076         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
2077         .mac_addr_add         = dpaa2_dev_add_mac_addr,
2078         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2079         .mac_addr_set         = dpaa2_dev_set_mac_addr,
2080         .rss_hash_update      = dpaa2_dev_rss_hash_update,
2081         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2082         .filter_ctrl          = dpaa2_dev_flow_ctrl,
2083 };
2084
2085 /* Populate the mac address from physically available (u-boot/firmware) and/or
2086  * one set by higher layers like MC (restool) etc.
2087  * Returns the table of MAC entries (multiple entries)
2088  */
2089 static int
2090 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2091                   struct rte_ether_addr *mac_entry)
2092 {
2093         int ret;
2094         struct rte_ether_addr phy_mac, prime_mac;
2095
2096         memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2097         memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2098
2099         /* Get the physical device MAC address */
2100         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2101                                      phy_mac.addr_bytes);
2102         if (ret) {
2103                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2104                 goto cleanup;
2105         }
2106
2107         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2108                                         prime_mac.addr_bytes);
2109         if (ret) {
2110                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2111                 goto cleanup;
2112         }
2113
2114         /* Now that both MAC have been obtained, do:
2115          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2116          *     and return phy
2117          *  If empty_mac(phy), return prime.
2118          *  if both are empty, create random MAC, set as prime and return
2119          */
2120         if (!rte_is_zero_ether_addr(&phy_mac)) {
2121                 /* If the addresses are not same, overwrite prime */
2122                 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2123                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2124                                                         priv->token,
2125                                                         phy_mac.addr_bytes);
2126                         if (ret) {
2127                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2128                                               ret);
2129                                 goto cleanup;
2130                         }
2131                         memcpy(&prime_mac, &phy_mac,
2132                                 sizeof(struct rte_ether_addr));
2133                 }
2134         } else if (rte_is_zero_ether_addr(&prime_mac)) {
2135                 /* In case phys and prime, both are zero, create random MAC */
2136                 rte_eth_random_addr(prime_mac.addr_bytes);
2137                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2138                                                 priv->token,
2139                                                 prime_mac.addr_bytes);
2140                 if (ret) {
2141                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2142                         goto cleanup;
2143                 }
2144         }
2145
2146         /* prime_mac the final MAC address */
2147         memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2148         return 0;
2149
2150 cleanup:
2151         return -1;
2152 }
2153
2154 static int
2155 check_devargs_handler(__rte_unused const char *key, const char *value,
2156                       __rte_unused void *opaque)
2157 {
2158         if (strcmp(value, "1"))
2159                 return -1;
2160
2161         return 0;
2162 }
2163
2164 static int
2165 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2166 {
2167         struct rte_kvargs *kvlist;
2168
2169         if (!devargs)
2170                 return 0;
2171
2172         kvlist = rte_kvargs_parse(devargs->args, NULL);
2173         if (!kvlist)
2174                 return 0;
2175
2176         if (!rte_kvargs_count(kvlist, key)) {
2177                 rte_kvargs_free(kvlist);
2178                 return 0;
2179         }
2180
2181         if (rte_kvargs_process(kvlist, key,
2182                                check_devargs_handler, NULL) < 0) {
2183                 rte_kvargs_free(kvlist);
2184                 return 0;
2185         }
2186         rte_kvargs_free(kvlist);
2187
2188         return 1;
2189 }
2190
2191 static int
2192 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2193 {
2194         struct rte_device *dev = eth_dev->device;
2195         struct rte_dpaa2_device *dpaa2_dev;
2196         struct fsl_mc_io *dpni_dev;
2197         struct dpni_attr attr;
2198         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2199         struct dpni_buffer_layout layout;
2200         int ret, hw_id, i;
2201
2202         PMD_INIT_FUNC_TRACE();
2203
2204         /* For secondary processes, the primary has done all the work */
2205         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2206                 /* In case of secondary, only burst and ops API need to be
2207                  * plugged.
2208                  */
2209                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2210                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2211                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2212                 else if (dpaa2_get_devargs(dev->devargs,
2213                                         DRIVER_NO_PREFETCH_MODE))
2214                         eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2215                 else
2216                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2217                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2218                 return 0;
2219         }
2220
2221         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2222
2223         hw_id = dpaa2_dev->object_id;
2224
2225         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2226         if (!dpni_dev) {
2227                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2228                 return -1;
2229         }
2230
2231         dpni_dev->regs = rte_mcp_ptr_list[0];
2232         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2233         if (ret) {
2234                 DPAA2_PMD_ERR(
2235                              "Failure in opening dpni@%d with err code %d",
2236                              hw_id, ret);
2237                 rte_free(dpni_dev);
2238                 return -1;
2239         }
2240
2241         /* Clean the device first */
2242         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2243         if (ret) {
2244                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2245                               hw_id, ret);
2246                 goto init_err;
2247         }
2248
2249         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2250         if (ret) {
2251                 DPAA2_PMD_ERR(
2252                              "Failure in get dpni@%d attribute, err code %d",
2253                              hw_id, ret);
2254                 goto init_err;
2255         }
2256
2257         priv->num_rx_tc = attr.num_rx_tcs;
2258         /* only if the custom CG is enabled */
2259         if (attr.options & DPNI_OPT_CUSTOM_CG)
2260                 priv->max_cgs = attr.num_cgs;
2261         else
2262                 priv->max_cgs = 0;
2263
2264         for (i = 0; i < priv->max_cgs; i++)
2265                 priv->cgid_in_use[i] = 0;
2266
2267         for (i = 0; i < attr.num_rx_tcs; i++)
2268                 priv->nb_rx_queues += attr.num_queues;
2269
2270         /* Using number of TX queues as number of TX TCs */
2271         priv->nb_tx_queues = attr.num_tx_tcs;
2272
2273         DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2274                         priv->num_rx_tc, priv->nb_rx_queues,
2275                         priv->nb_tx_queues, priv->max_cgs);
2276
2277         priv->hw = dpni_dev;
2278         priv->hw_id = hw_id;
2279         priv->options = attr.options;
2280         priv->max_mac_filters = attr.mac_filter_entries;
2281         priv->max_vlan_filters = attr.vlan_filter_entries;
2282         priv->flags = 0;
2283
2284         /* Allocate memory for hardware structure for queues */
2285         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2286         if (ret) {
2287                 DPAA2_PMD_ERR("Queue allocation Failed");
2288                 goto init_err;
2289         }
2290
2291         /* Allocate memory for storing MAC addresses.
2292          * Table of mac_filter_entries size is allocated so that RTE ether lib
2293          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2294          */
2295         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2296                 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2297         if (eth_dev->data->mac_addrs == NULL) {
2298                 DPAA2_PMD_ERR(
2299                    "Failed to allocate %d bytes needed to store MAC addresses",
2300                    RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2301                 ret = -ENOMEM;
2302                 goto init_err;
2303         }
2304
2305         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2306         if (ret) {
2307                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2308                 rte_free(eth_dev->data->mac_addrs);
2309                 eth_dev->data->mac_addrs = NULL;
2310                 goto init_err;
2311         }
2312
2313         /* ... tx buffer layout ... */
2314         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2315         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2316         layout.pass_frame_status = 1;
2317         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2318                                      DPNI_QUEUE_TX, &layout);
2319         if (ret) {
2320                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2321                 goto init_err;
2322         }
2323
2324         /* ... tx-conf and error buffer layout ... */
2325         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2326         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2327         layout.pass_frame_status = 1;
2328         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2329                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2330         if (ret) {
2331                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2332                              ret);
2333                 goto init_err;
2334         }
2335
2336         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2337
2338         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2339                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2340                 DPAA2_PMD_INFO("Loopback mode");
2341         } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2342                 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2343                 DPAA2_PMD_INFO("No Prefetch mode");
2344         } else {
2345                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2346         }
2347         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2348
2349         /*Init fields w.r.t. classficaition*/
2350         memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
2351         priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2352         if (!priv->extract.qos_extract_param) {
2353                 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2354                             " classificaiton ", ret);
2355                 goto init_err;
2356         }
2357         for (i = 0; i < MAX_TCS; i++) {
2358                 memset(&priv->extract.fs_key_cfg[i], 0,
2359                         sizeof(struct dpkg_profile_cfg));
2360                 priv->extract.fs_extract_param[i] =
2361                         (size_t)rte_malloc(NULL, 256, 64);
2362                 if (!priv->extract.fs_extract_param[i]) {
2363                         DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2364                                      ret);
2365                         goto init_err;
2366                 }
2367         }
2368
2369         RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2370         return 0;
2371 init_err:
2372         dpaa2_dev_uninit(eth_dev);
2373         return ret;
2374 }
2375
2376 static int
2377 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2378 {
2379         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2380         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2381         int i, ret;
2382
2383         PMD_INIT_FUNC_TRACE();
2384
2385         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2386                 return 0;
2387
2388         if (!dpni) {
2389                 DPAA2_PMD_WARN("Already closed or not started");
2390                 return -1;
2391         }
2392
2393         dpaa2_dev_close(eth_dev);
2394
2395         dpaa2_free_rx_tx_queues(eth_dev);
2396
2397         /* Close the device at underlying layer*/
2398         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2399         if (ret) {
2400                 DPAA2_PMD_ERR(
2401                              "Failure closing dpni device with err code %d",
2402                              ret);
2403         }
2404
2405         /* Free the allocated memory for ethernet private data and dpni*/
2406         priv->hw = NULL;
2407         rte_free(dpni);
2408
2409         for (i = 0; i < MAX_TCS; i++) {
2410                 if (priv->extract.fs_extract_param[i])
2411                         rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
2412         }
2413
2414         if (priv->extract.qos_extract_param)
2415                 rte_free((void *)(size_t)priv->extract.qos_extract_param);
2416
2417         eth_dev->dev_ops = NULL;
2418         eth_dev->rx_pkt_burst = NULL;
2419         eth_dev->tx_pkt_burst = NULL;
2420
2421         DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2422         return 0;
2423 }
2424
2425 static int
2426 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2427                 struct rte_dpaa2_device *dpaa2_dev)
2428 {
2429         struct rte_eth_dev *eth_dev;
2430         int diag;
2431
2432         if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2433                 RTE_PKTMBUF_HEADROOM) {
2434                 DPAA2_PMD_ERR(
2435                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2436                 RTE_PKTMBUF_HEADROOM,
2437                 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2438
2439                 return -1;
2440         }
2441
2442         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2443                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2444                 if (!eth_dev)
2445                         return -ENODEV;
2446                 eth_dev->data->dev_private = rte_zmalloc(
2447                                                 "ethdev private structure",
2448                                                 sizeof(struct dpaa2_dev_priv),
2449                                                 RTE_CACHE_LINE_SIZE);
2450                 if (eth_dev->data->dev_private == NULL) {
2451                         DPAA2_PMD_CRIT(
2452                                 "Unable to allocate memory for private data");
2453                         rte_eth_dev_release_port(eth_dev);
2454                         return -ENOMEM;
2455                 }
2456         } else {
2457                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2458                 if (!eth_dev)
2459                         return -ENODEV;
2460         }
2461
2462         eth_dev->device = &dpaa2_dev->device;
2463
2464         dpaa2_dev->eth_dev = eth_dev;
2465         eth_dev->data->rx_mbuf_alloc_failed = 0;
2466
2467         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2468                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2469
2470         /* Invoke PMD device initialization function */
2471         diag = dpaa2_dev_init(eth_dev);
2472         if (diag == 0) {
2473                 rte_eth_dev_probing_finish(eth_dev);
2474                 return 0;
2475         }
2476
2477         rte_eth_dev_release_port(eth_dev);
2478         return diag;
2479 }
2480
2481 static int
2482 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2483 {
2484         struct rte_eth_dev *eth_dev;
2485
2486         eth_dev = dpaa2_dev->eth_dev;
2487         dpaa2_dev_uninit(eth_dev);
2488
2489         rte_eth_dev_release_port(eth_dev);
2490
2491         return 0;
2492 }
2493
2494 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2495         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2496         .drv_type = DPAA2_ETH,
2497         .probe = rte_dpaa2_probe,
2498         .remove = rte_dpaa2_remove,
2499 };
2500
2501 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2502 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2503                 DRIVER_LOOPBACK_MODE "=<int> "
2504                 DRIVER_NO_PREFETCH_MODE "=<int>");
2505 RTE_INIT(dpaa2_pmd_init_log)
2506 {
2507         dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2508         if (dpaa2_logtype_pmd >= 0)
2509                 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
2510 }