net/dpaa2: enable Rx offload for timestamp
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include <fsl_qbman_debug.h>
30
31 #define DRIVER_LOOPBACK_MODE "drv_loopback"
32
33 /* Supported Rx offloads */
34 static uint64_t dev_rx_offloads_sup =
35                 DEV_RX_OFFLOAD_CHECKSUM |
36                 DEV_RX_OFFLOAD_SCTP_CKSUM |
37                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
38                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
39                 DEV_RX_OFFLOAD_VLAN_STRIP |
40                 DEV_RX_OFFLOAD_VLAN_FILTER |
41                 DEV_RX_OFFLOAD_JUMBO_FRAME |
42                 DEV_RX_OFFLOAD_TIMESTAMP;
43
44 /* Rx offloads which cannot be disabled */
45 static uint64_t dev_rx_offloads_nodis =
46                 DEV_RX_OFFLOAD_SCATTER;
47
48 /* Supported Tx offloads */
49 static uint64_t dev_tx_offloads_sup =
50                 DEV_TX_OFFLOAD_VLAN_INSERT |
51                 DEV_TX_OFFLOAD_IPV4_CKSUM |
52                 DEV_TX_OFFLOAD_UDP_CKSUM |
53                 DEV_TX_OFFLOAD_TCP_CKSUM |
54                 DEV_TX_OFFLOAD_SCTP_CKSUM |
55                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
56                 DEV_TX_OFFLOAD_MT_LOCKFREE |
57                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
58
59 /* Tx offloads which cannot be disabled */
60 static uint64_t dev_tx_offloads_nodis =
61                 DEV_TX_OFFLOAD_MULTI_SEGS;
62
63 /* enable timestamp in mbuf */
64 enum pmd_dpaa2_ts dpaa2_enable_ts;
65
66 struct rte_dpaa2_xstats_name_off {
67         char name[RTE_ETH_XSTATS_NAME_SIZE];
68         uint8_t page_id; /* dpni statistics page id */
69         uint8_t stats_id; /* stats id in the given page */
70 };
71
72 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
73         {"ingress_multicast_frames", 0, 2},
74         {"ingress_multicast_bytes", 0, 3},
75         {"ingress_broadcast_frames", 0, 4},
76         {"ingress_broadcast_bytes", 0, 5},
77         {"egress_multicast_frames", 1, 2},
78         {"egress_multicast_bytes", 1, 3},
79         {"egress_broadcast_frames", 1, 4},
80         {"egress_broadcast_bytes", 1, 5},
81         {"ingress_filtered_frames", 2, 0},
82         {"ingress_discarded_frames", 2, 1},
83         {"ingress_nobuffer_discards", 2, 2},
84         {"egress_discarded_frames", 2, 3},
85         {"egress_confirmed_frames", 2, 4},
86 };
87
88 static const enum rte_filter_op dpaa2_supported_filter_ops[] = {
89         RTE_ETH_FILTER_ADD,
90         RTE_ETH_FILTER_DELETE,
91         RTE_ETH_FILTER_UPDATE,
92         RTE_ETH_FILTER_FLUSH,
93         RTE_ETH_FILTER_GET
94 };
95
96 static struct rte_dpaa2_driver rte_dpaa2_pmd;
97 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
98 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
99                                  int wait_to_complete);
100 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
101 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
102 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
103
104 int dpaa2_logtype_pmd;
105
106 void
107 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
108 {
109         dpaa2_enable_ts = enable;
110 }
111
112 static int
113 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
114 {
115         int ret;
116         struct dpaa2_dev_priv *priv = dev->data->dev_private;
117         struct fsl_mc_io *dpni = priv->hw;
118
119         PMD_INIT_FUNC_TRACE();
120
121         if (dpni == NULL) {
122                 DPAA2_PMD_ERR("dpni is NULL");
123                 return -1;
124         }
125
126         if (on)
127                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
128                                        priv->token, vlan_id);
129         else
130                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
131                                           priv->token, vlan_id);
132
133         if (ret < 0)
134                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
135                               ret, vlan_id, priv->hw_id);
136
137         return ret;
138 }
139
140 static int
141 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
142 {
143         struct dpaa2_dev_priv *priv = dev->data->dev_private;
144         struct fsl_mc_io *dpni = priv->hw;
145         int ret;
146
147         PMD_INIT_FUNC_TRACE();
148
149         if (mask & ETH_VLAN_FILTER_MASK) {
150                 /* VLAN Filter not avaialble */
151                 if (!priv->max_vlan_filters) {
152                         DPAA2_PMD_INFO("VLAN filter not available");
153                         goto next_mask;
154                 }
155
156                 if (dev->data->dev_conf.rxmode.offloads &
157                         DEV_RX_OFFLOAD_VLAN_FILTER)
158                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
159                                                       priv->token, true);
160                 else
161                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
162                                                       priv->token, false);
163                 if (ret < 0)
164                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
165         }
166 next_mask:
167         if (mask & ETH_VLAN_EXTEND_MASK) {
168                 if (dev->data->dev_conf.rxmode.offloads &
169                         DEV_RX_OFFLOAD_VLAN_EXTEND)
170                         DPAA2_PMD_INFO("VLAN extend offload not supported");
171         }
172
173         return 0;
174 }
175
176 static int
177 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
178                       enum rte_vlan_type vlan_type __rte_unused,
179                       uint16_t tpid)
180 {
181         struct dpaa2_dev_priv *priv = dev->data->dev_private;
182         struct fsl_mc_io *dpni = priv->hw;
183         int ret = -ENOTSUP;
184
185         PMD_INIT_FUNC_TRACE();
186
187         /* nothing to be done for standard vlan tpids */
188         if (tpid == 0x8100 || tpid == 0x88A8)
189                 return 0;
190
191         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
192                                    priv->token, tpid);
193         if (ret < 0)
194                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
195         /* if already configured tpids, remove them first */
196         if (ret == -EBUSY) {
197                 struct dpni_custom_tpid_cfg tpid_list = {0};
198
199                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
200                                    priv->token, &tpid_list);
201                 if (ret < 0)
202                         goto fail;
203                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
204                                    priv->token, tpid_list.tpid1);
205                 if (ret < 0)
206                         goto fail;
207                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
208                                            priv->token, tpid);
209         }
210 fail:
211         return ret;
212 }
213
214 static int
215 dpaa2_fw_version_get(struct rte_eth_dev *dev,
216                      char *fw_version,
217                      size_t fw_size)
218 {
219         int ret;
220         struct dpaa2_dev_priv *priv = dev->data->dev_private;
221         struct fsl_mc_io *dpni = priv->hw;
222         struct mc_soc_version mc_plat_info = {0};
223         struct mc_version mc_ver_info = {0};
224
225         PMD_INIT_FUNC_TRACE();
226
227         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
228                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
229
230         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
231                 DPAA2_PMD_WARN("\tmc_get_version failed");
232
233         ret = snprintf(fw_version, fw_size,
234                        "%x-%d.%d.%d",
235                        mc_plat_info.svr,
236                        mc_ver_info.major,
237                        mc_ver_info.minor,
238                        mc_ver_info.revision);
239
240         ret += 1; /* add the size of '\0' */
241         if (fw_size < (uint32_t)ret)
242                 return ret;
243         else
244                 return 0;
245 }
246
247 static int
248 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
249 {
250         struct dpaa2_dev_priv *priv = dev->data->dev_private;
251
252         PMD_INIT_FUNC_TRACE();
253
254         dev_info->if_index = priv->hw_id;
255
256         dev_info->max_mac_addrs = priv->max_mac_filters;
257         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
258         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
259         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
260         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
261         dev_info->rx_offload_capa = dev_rx_offloads_sup |
262                                         dev_rx_offloads_nodis;
263         dev_info->tx_offload_capa = dev_tx_offloads_sup |
264                                         dev_tx_offloads_nodis;
265         dev_info->speed_capa = ETH_LINK_SPEED_1G |
266                         ETH_LINK_SPEED_2_5G |
267                         ETH_LINK_SPEED_10G;
268
269         dev_info->max_hash_mac_addrs = 0;
270         dev_info->max_vfs = 0;
271         dev_info->max_vmdq_pools = ETH_16_POOLS;
272         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
273
274         return 0;
275 }
276
277 static int
278 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
279 {
280         struct dpaa2_dev_priv *priv = dev->data->dev_private;
281         uint16_t dist_idx;
282         uint32_t vq_id;
283         uint8_t num_rxqueue_per_tc;
284         struct dpaa2_queue *mc_q, *mcq;
285         uint32_t tot_queues;
286         int i;
287         struct dpaa2_queue *dpaa2_q;
288
289         PMD_INIT_FUNC_TRACE();
290
291         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
292         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
293         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
294                           RTE_CACHE_LINE_SIZE);
295         if (!mc_q) {
296                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
297                 return -1;
298         }
299
300         for (i = 0; i < priv->nb_rx_queues; i++) {
301                 mc_q->eth_data = dev->data;
302                 priv->rx_vq[i] = mc_q++;
303                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
304                 dpaa2_q->q_storage = rte_malloc("dq_storage",
305                                         sizeof(struct queue_storage_info_t),
306                                         RTE_CACHE_LINE_SIZE);
307                 if (!dpaa2_q->q_storage)
308                         goto fail;
309
310                 memset(dpaa2_q->q_storage, 0,
311                        sizeof(struct queue_storage_info_t));
312                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
313                         goto fail;
314         }
315
316         for (i = 0; i < priv->nb_tx_queues; i++) {
317                 mc_q->eth_data = dev->data;
318                 mc_q->flow_id = 0xffff;
319                 priv->tx_vq[i] = mc_q++;
320                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
321                 dpaa2_q->cscn = rte_malloc(NULL,
322                                            sizeof(struct qbman_result), 16);
323                 if (!dpaa2_q->cscn)
324                         goto fail_tx;
325         }
326
327         vq_id = 0;
328         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
329                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
330                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
331                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
332                 vq_id++;
333         }
334
335         return 0;
336 fail_tx:
337         i -= 1;
338         while (i >= 0) {
339                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
340                 rte_free(dpaa2_q->cscn);
341                 priv->tx_vq[i--] = NULL;
342         }
343         i = priv->nb_rx_queues;
344 fail:
345         i -= 1;
346         mc_q = priv->rx_vq[0];
347         while (i >= 0) {
348                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
349                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
350                 rte_free(dpaa2_q->q_storage);
351                 priv->rx_vq[i--] = NULL;
352         }
353         rte_free(mc_q);
354         return -1;
355 }
356
357 static void
358 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
359 {
360         struct dpaa2_dev_priv *priv = dev->data->dev_private;
361         struct dpaa2_queue *dpaa2_q;
362         int i;
363
364         PMD_INIT_FUNC_TRACE();
365
366         /* Queue allocation base */
367         if (priv->rx_vq[0]) {
368                 /* cleaning up queue storage */
369                 for (i = 0; i < priv->nb_rx_queues; i++) {
370                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
371                         if (dpaa2_q->q_storage)
372                                 rte_free(dpaa2_q->q_storage);
373                 }
374                 /* cleanup tx queue cscn */
375                 for (i = 0; i < priv->nb_tx_queues; i++) {
376                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
377                         rte_free(dpaa2_q->cscn);
378                 }
379                 /*free memory for all queues (RX+TX) */
380                 rte_free(priv->rx_vq[0]);
381                 priv->rx_vq[0] = NULL;
382         }
383 }
384
385 static int
386 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
387 {
388         struct dpaa2_dev_priv *priv = dev->data->dev_private;
389         struct fsl_mc_io *dpni = priv->hw;
390         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
391         uint64_t rx_offloads = eth_conf->rxmode.offloads;
392         uint64_t tx_offloads = eth_conf->txmode.offloads;
393         int rx_l3_csum_offload = false;
394         int rx_l4_csum_offload = false;
395         int tx_l3_csum_offload = false;
396         int tx_l4_csum_offload = false;
397         int ret;
398
399         PMD_INIT_FUNC_TRACE();
400
401         /* Rx offloads which are enabled by default */
402         if (dev_rx_offloads_nodis & ~rx_offloads) {
403                 DPAA2_PMD_INFO(
404                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
405                 " fixed are 0x%" PRIx64,
406                 rx_offloads, dev_rx_offloads_nodis);
407         }
408
409         /* Tx offloads which are enabled by default */
410         if (dev_tx_offloads_nodis & ~tx_offloads) {
411                 DPAA2_PMD_INFO(
412                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
413                 " fixed are 0x%" PRIx64,
414                 tx_offloads, dev_tx_offloads_nodis);
415         }
416
417         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
418                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
419                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
420                                 priv->token, eth_conf->rxmode.max_rx_pkt_len);
421                         if (ret) {
422                                 DPAA2_PMD_ERR(
423                                         "Unable to set mtu. check config");
424                                 return ret;
425                         }
426                 } else {
427                         return -1;
428                 }
429         }
430
431         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
432                 ret = dpaa2_setup_flow_dist(dev,
433                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
434                 if (ret) {
435                         DPAA2_PMD_ERR("Unable to set flow distribution."
436                                       "Check queue config");
437                         return ret;
438                 }
439         }
440
441         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
442                 rx_l3_csum_offload = true;
443
444         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
445                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
446                 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
447                 rx_l4_csum_offload = true;
448
449         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
450                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
451         if (ret) {
452                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
453                 return ret;
454         }
455
456         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
457                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
458         if (ret) {
459                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
460                 return ret;
461         }
462
463         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
464                 dpaa2_enable_ts = true;
465
466         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
467                 tx_l3_csum_offload = true;
468
469         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
470                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
471                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
472                 tx_l4_csum_offload = true;
473
474         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
475                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
476         if (ret) {
477                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
478                 return ret;
479         }
480
481         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
482                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
483         if (ret) {
484                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
485                 return ret;
486         }
487
488         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
489          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
490          * to 0 for LS2 in the hardware thus disabling data/annotation
491          * stashing. For LX2 this is fixed in hardware and thus hash result and
492          * parse results can be received in FD using this option.
493          */
494         if (dpaa2_svr_family == SVR_LX2160A) {
495                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
496                                        DPNI_FLCTYPE_HASH, true);
497                 if (ret) {
498                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
499                         return ret;
500                 }
501         }
502
503         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
504                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
505
506         /* update the current status */
507         dpaa2_dev_link_update(dev, 0);
508
509         return 0;
510 }
511
512 /* Function to setup RX flow information. It contains traffic class ID,
513  * flow ID, destination configuration etc.
514  */
515 static int
516 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
517                          uint16_t rx_queue_id,
518                          uint16_t nb_rx_desc __rte_unused,
519                          unsigned int socket_id __rte_unused,
520                          const struct rte_eth_rxconf *rx_conf __rte_unused,
521                          struct rte_mempool *mb_pool)
522 {
523         struct dpaa2_dev_priv *priv = dev->data->dev_private;
524         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
525         struct dpaa2_queue *dpaa2_q;
526         struct dpni_queue cfg;
527         uint8_t options = 0;
528         uint8_t flow_id;
529         uint32_t bpid;
530         int ret;
531
532         PMD_INIT_FUNC_TRACE();
533
534         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
535                         dev, rx_queue_id, mb_pool, rx_conf);
536
537         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
538                 bpid = mempool_to_bpid(mb_pool);
539                 ret = dpaa2_attach_bp_list(priv,
540                                            rte_dpaa2_bpid_info[bpid].bp_list);
541                 if (ret)
542                         return ret;
543         }
544         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
545         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
546         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
547
548         /*Get the flow id from given VQ id*/
549         flow_id = rx_queue_id % priv->nb_rx_queues;
550         memset(&cfg, 0, sizeof(struct dpni_queue));
551
552         options = options | DPNI_QUEUE_OPT_USER_CTX;
553         cfg.user_context = (size_t)(dpaa2_q);
554
555         /*if ls2088 or rev2 device, enable the stashing */
556
557         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
558                 options |= DPNI_QUEUE_OPT_FLC;
559                 cfg.flc.stash_control = true;
560                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
561                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
562                  * data stashing setting 01 01 00 (0x14)
563                  * (in following order ->DS AS CS)
564                  * to enable 1 line data, 1 line annotation.
565                  * For LX2, this setting should be 01 00 00 (0x10)
566                  */
567                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
568                         cfg.flc.value |= 0x10;
569                 else
570                         cfg.flc.value |= 0x14;
571         }
572         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
573                              dpaa2_q->tc_index, flow_id, options, &cfg);
574         if (ret) {
575                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
576                 return -1;
577         }
578
579         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
580                 struct dpni_taildrop taildrop;
581
582                 taildrop.enable = 1;
583                 /*enabling per rx queue congestion control */
584                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
585                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
586                 taildrop.oal = CONG_RX_OAL;
587                 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
588                                 rx_queue_id);
589                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
590                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
591                                         dpaa2_q->tc_index, flow_id, &taildrop);
592                 if (ret) {
593                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
594                                       ret);
595                         return -1;
596                 }
597         }
598
599         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
600         return 0;
601 }
602
603 static int
604 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
605                          uint16_t tx_queue_id,
606                          uint16_t nb_tx_desc __rte_unused,
607                          unsigned int socket_id __rte_unused,
608                          const struct rte_eth_txconf *tx_conf __rte_unused)
609 {
610         struct dpaa2_dev_priv *priv = dev->data->dev_private;
611         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
612                 priv->tx_vq[tx_queue_id];
613         struct fsl_mc_io *dpni = priv->hw;
614         struct dpni_queue tx_conf_cfg;
615         struct dpni_queue tx_flow_cfg;
616         uint8_t options = 0, flow_id;
617         uint32_t tc_id;
618         int ret;
619
620         PMD_INIT_FUNC_TRACE();
621
622         /* Return if queue already configured */
623         if (dpaa2_q->flow_id != 0xffff) {
624                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
625                 return 0;
626         }
627
628         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
629         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
630
631         tc_id = tx_queue_id;
632         flow_id = 0;
633
634         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
635                              tc_id, flow_id, options, &tx_flow_cfg);
636         if (ret) {
637                 DPAA2_PMD_ERR("Error in setting the tx flow: "
638                               "tc_id=%d, flow=%d err=%d",
639                               tc_id, flow_id, ret);
640                         return -1;
641         }
642
643         dpaa2_q->flow_id = flow_id;
644
645         if (tx_queue_id == 0) {
646                 /*Set tx-conf and error configuration*/
647                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
648                                                     priv->token,
649                                                     DPNI_CONF_DISABLE);
650                 if (ret) {
651                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
652                                       "err=%d", ret);
653                         return -1;
654                 }
655         }
656         dpaa2_q->tc_index = tc_id;
657
658         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
659                 struct dpni_congestion_notification_cfg cong_notif_cfg;
660
661                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
662                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
663                 /* Notify that the queue is not congested when the data in
664                  * the queue is below this thershold.
665                  */
666                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
667                 cong_notif_cfg.message_ctx = 0;
668                 cong_notif_cfg.message_iova =
669                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
670                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
671                 cong_notif_cfg.notification_mode =
672                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
673                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
674                                          DPNI_CONG_OPT_COHERENT_WRITE;
675                 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
676
677                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
678                                                        priv->token,
679                                                        DPNI_QUEUE_TX,
680                                                        tc_id,
681                                                        &cong_notif_cfg);
682                 if (ret) {
683                         DPAA2_PMD_ERR(
684                            "Error in setting tx congestion notification: "
685                            "err=%d", ret);
686                         return -ret;
687                 }
688         }
689         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
690         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
691         return 0;
692 }
693
694 static void
695 dpaa2_dev_rx_queue_release(void *q __rte_unused)
696 {
697         PMD_INIT_FUNC_TRACE();
698 }
699
700 static void
701 dpaa2_dev_tx_queue_release(void *q __rte_unused)
702 {
703         PMD_INIT_FUNC_TRACE();
704 }
705
706 static uint32_t
707 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
708 {
709         int32_t ret;
710         struct dpaa2_dev_priv *priv = dev->data->dev_private;
711         struct dpaa2_queue *dpaa2_q;
712         struct qbman_swp *swp;
713         struct qbman_fq_query_np_rslt state;
714         uint32_t frame_cnt = 0;
715
716         PMD_INIT_FUNC_TRACE();
717
718         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
719                 ret = dpaa2_affine_qbman_swp();
720                 if (ret) {
721                         DPAA2_PMD_ERR("Failure in affining portal");
722                         return -EINVAL;
723                 }
724         }
725         swp = DPAA2_PER_LCORE_PORTAL;
726
727         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
728
729         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
730                 frame_cnt = qbman_fq_state_frame_count(&state);
731                 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
732                                 rx_queue_id, frame_cnt);
733         }
734         return frame_cnt;
735 }
736
737 static const uint32_t *
738 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
739 {
740         static const uint32_t ptypes[] = {
741                 /*todo -= add more types */
742                 RTE_PTYPE_L2_ETHER,
743                 RTE_PTYPE_L3_IPV4,
744                 RTE_PTYPE_L3_IPV4_EXT,
745                 RTE_PTYPE_L3_IPV6,
746                 RTE_PTYPE_L3_IPV6_EXT,
747                 RTE_PTYPE_L4_TCP,
748                 RTE_PTYPE_L4_UDP,
749                 RTE_PTYPE_L4_SCTP,
750                 RTE_PTYPE_L4_ICMP,
751                 RTE_PTYPE_UNKNOWN
752         };
753
754         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
755                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
756                 return ptypes;
757         return NULL;
758 }
759
760 /**
761  * Dpaa2 link Interrupt handler
762  *
763  * @param param
764  *  The address of parameter (struct rte_eth_dev *) regsitered before.
765  *
766  * @return
767  *  void
768  */
769 static void
770 dpaa2_interrupt_handler(void *param)
771 {
772         struct rte_eth_dev *dev = param;
773         struct dpaa2_dev_priv *priv = dev->data->dev_private;
774         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
775         int ret;
776         int irq_index = DPNI_IRQ_INDEX;
777         unsigned int status = 0, clear = 0;
778
779         PMD_INIT_FUNC_TRACE();
780
781         if (dpni == NULL) {
782                 DPAA2_PMD_ERR("dpni is NULL");
783                 return;
784         }
785
786         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
787                                   irq_index, &status);
788         if (unlikely(ret)) {
789                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
790                 clear = 0xffffffff;
791                 goto out;
792         }
793
794         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
795                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
796                 dpaa2_dev_link_update(dev, 0);
797                 /* calling all the apps registered for link status event */
798                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
799                                               NULL);
800         }
801 out:
802         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
803                                     irq_index, clear);
804         if (unlikely(ret))
805                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
806 }
807
808 static int
809 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
810 {
811         int err = 0;
812         struct dpaa2_dev_priv *priv = dev->data->dev_private;
813         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
814         int irq_index = DPNI_IRQ_INDEX;
815         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
816
817         PMD_INIT_FUNC_TRACE();
818
819         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
820                                 irq_index, mask);
821         if (err < 0) {
822                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
823                               strerror(-err));
824                 return err;
825         }
826
827         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
828                                   irq_index, enable);
829         if (err < 0)
830                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
831                               strerror(-err));
832
833         return err;
834 }
835
836 static int
837 dpaa2_dev_start(struct rte_eth_dev *dev)
838 {
839         struct rte_device *rdev = dev->device;
840         struct rte_dpaa2_device *dpaa2_dev;
841         struct rte_eth_dev_data *data = dev->data;
842         struct dpaa2_dev_priv *priv = data->dev_private;
843         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
844         struct dpni_queue cfg;
845         struct dpni_error_cfg   err_cfg;
846         uint16_t qdid;
847         struct dpni_queue_id qid;
848         struct dpaa2_queue *dpaa2_q;
849         int ret, i;
850         struct rte_intr_handle *intr_handle;
851
852         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
853         intr_handle = &dpaa2_dev->intr_handle;
854
855         PMD_INIT_FUNC_TRACE();
856
857         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
858         if (ret) {
859                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
860                               priv->hw_id, ret);
861                 return ret;
862         }
863
864         /* Power up the phy. Needed to make the link go UP */
865         dpaa2_dev_set_link_up(dev);
866
867         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
868                             DPNI_QUEUE_TX, &qdid);
869         if (ret) {
870                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
871                 return ret;
872         }
873         priv->qdid = qdid;
874
875         for (i = 0; i < data->nb_rx_queues; i++) {
876                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
877                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
878                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
879                                        dpaa2_q->flow_id, &cfg, &qid);
880                 if (ret) {
881                         DPAA2_PMD_ERR("Error in getting flow information: "
882                                       "err=%d", ret);
883                         return ret;
884                 }
885                 dpaa2_q->fqid = qid.fqid;
886         }
887
888         /*checksum errors, send them to normal path and set it in annotation */
889         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
890         err_cfg.errors |= DPNI_ERROR_PHE;
891
892         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
893         err_cfg.set_frame_annotation = true;
894
895         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
896                                        priv->token, &err_cfg);
897         if (ret) {
898                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
899                               ret);
900                 return ret;
901         }
902
903         /* if the interrupts were configured on this devices*/
904         if (intr_handle && (intr_handle->fd) &&
905             (dev->data->dev_conf.intr_conf.lsc != 0)) {
906                 /* Registering LSC interrupt handler */
907                 rte_intr_callback_register(intr_handle,
908                                            dpaa2_interrupt_handler,
909                                            (void *)dev);
910
911                 /* enable vfio intr/eventfd mapping
912                  * Interrupt index 0 is required, so we can not use
913                  * rte_intr_enable.
914                  */
915                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
916
917                 /* enable dpni_irqs */
918                 dpaa2_eth_setup_irqs(dev, 1);
919         }
920
921         /* Change the tx burst function if ordered queues are used */
922         if (priv->en_ordered)
923                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
924
925         return 0;
926 }
927
928 /**
929  *  This routine disables all traffic on the adapter by issuing a
930  *  global reset on the MAC.
931  */
932 static void
933 dpaa2_dev_stop(struct rte_eth_dev *dev)
934 {
935         struct dpaa2_dev_priv *priv = dev->data->dev_private;
936         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
937         int ret;
938         struct rte_eth_link link;
939         struct rte_intr_handle *intr_handle = dev->intr_handle;
940
941         PMD_INIT_FUNC_TRACE();
942
943         /* reset interrupt callback  */
944         if (intr_handle && (intr_handle->fd) &&
945             (dev->data->dev_conf.intr_conf.lsc != 0)) {
946                 /*disable dpni irqs */
947                 dpaa2_eth_setup_irqs(dev, 0);
948
949                 /* disable vfio intr before callback unregister */
950                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
951
952                 /* Unregistering LSC interrupt handler */
953                 rte_intr_callback_unregister(intr_handle,
954                                              dpaa2_interrupt_handler,
955                                              (void *)dev);
956         }
957
958         dpaa2_dev_set_link_down(dev);
959
960         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
961         if (ret) {
962                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
963                               ret, priv->hw_id);
964                 return;
965         }
966
967         /* clear the recorded link status */
968         memset(&link, 0, sizeof(link));
969         rte_eth_linkstatus_set(dev, &link);
970 }
971
972 static void
973 dpaa2_dev_close(struct rte_eth_dev *dev)
974 {
975         struct dpaa2_dev_priv *priv = dev->data->dev_private;
976         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
977         int ret;
978         struct rte_eth_link link;
979
980         PMD_INIT_FUNC_TRACE();
981
982         dpaa2_flow_clean(dev);
983
984         /* Clean the device first */
985         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
986         if (ret) {
987                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
988                 return;
989         }
990
991         memset(&link, 0, sizeof(link));
992         rte_eth_linkstatus_set(dev, &link);
993 }
994
995 static int
996 dpaa2_dev_promiscuous_enable(
997                 struct rte_eth_dev *dev)
998 {
999         int ret;
1000         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1001         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1002
1003         PMD_INIT_FUNC_TRACE();
1004
1005         if (dpni == NULL) {
1006                 DPAA2_PMD_ERR("dpni is NULL");
1007                 return -ENODEV;
1008         }
1009
1010         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1011         if (ret < 0)
1012                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1013
1014         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1015         if (ret < 0)
1016                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1017
1018         return ret;
1019 }
1020
1021 static int
1022 dpaa2_dev_promiscuous_disable(
1023                 struct rte_eth_dev *dev)
1024 {
1025         int ret;
1026         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1027         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1028
1029         PMD_INIT_FUNC_TRACE();
1030
1031         if (dpni == NULL) {
1032                 DPAA2_PMD_ERR("dpni is NULL");
1033                 return -ENODEV;
1034         }
1035
1036         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1037         if (ret < 0)
1038                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1039
1040         if (dev->data->all_multicast == 0) {
1041                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1042                                                  priv->token, false);
1043                 if (ret < 0)
1044                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1045                                       ret);
1046         }
1047
1048         return ret;
1049 }
1050
1051 static int
1052 dpaa2_dev_allmulticast_enable(
1053                 struct rte_eth_dev *dev)
1054 {
1055         int ret;
1056         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1057         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1058
1059         PMD_INIT_FUNC_TRACE();
1060
1061         if (dpni == NULL) {
1062                 DPAA2_PMD_ERR("dpni is NULL");
1063                 return -ENODEV;
1064         }
1065
1066         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1067         if (ret < 0)
1068                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1069
1070         return ret;
1071 }
1072
1073 static int
1074 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1075 {
1076         int ret;
1077         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1078         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1079
1080         PMD_INIT_FUNC_TRACE();
1081
1082         if (dpni == NULL) {
1083                 DPAA2_PMD_ERR("dpni is NULL");
1084                 return -ENODEV;
1085         }
1086
1087         /* must remain on for all promiscuous */
1088         if (dev->data->promiscuous == 1)
1089                 return 0;
1090
1091         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1092         if (ret < 0)
1093                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1094
1095         return ret;
1096 }
1097
1098 static int
1099 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1100 {
1101         int ret;
1102         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1103         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1104         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1105                                 + VLAN_TAG_SIZE;
1106
1107         PMD_INIT_FUNC_TRACE();
1108
1109         if (dpni == NULL) {
1110                 DPAA2_PMD_ERR("dpni is NULL");
1111                 return -EINVAL;
1112         }
1113
1114         /* check that mtu is within the allowed range */
1115         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1116                 return -EINVAL;
1117
1118         if (frame_size > RTE_ETHER_MAX_LEN)
1119                 dev->data->dev_conf.rxmode.offloads &=
1120                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1121         else
1122                 dev->data->dev_conf.rxmode.offloads &=
1123                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1124
1125         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1126
1127         /* Set the Max Rx frame length as 'mtu' +
1128          * Maximum Ethernet header length
1129          */
1130         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1131                                         frame_size);
1132         if (ret) {
1133                 DPAA2_PMD_ERR("Setting the max frame length failed");
1134                 return -1;
1135         }
1136         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1137         return 0;
1138 }
1139
1140 static int
1141 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1142                        struct rte_ether_addr *addr,
1143                        __rte_unused uint32_t index,
1144                        __rte_unused uint32_t pool)
1145 {
1146         int ret;
1147         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1148         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1149
1150         PMD_INIT_FUNC_TRACE();
1151
1152         if (dpni == NULL) {
1153                 DPAA2_PMD_ERR("dpni is NULL");
1154                 return -1;
1155         }
1156
1157         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1158                                 priv->token, addr->addr_bytes);
1159         if (ret)
1160                 DPAA2_PMD_ERR(
1161                         "error: Adding the MAC ADDR failed: err = %d", ret);
1162         return 0;
1163 }
1164
1165 static void
1166 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1167                           uint32_t index)
1168 {
1169         int ret;
1170         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1171         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1172         struct rte_eth_dev_data *data = dev->data;
1173         struct rte_ether_addr *macaddr;
1174
1175         PMD_INIT_FUNC_TRACE();
1176
1177         macaddr = &data->mac_addrs[index];
1178
1179         if (dpni == NULL) {
1180                 DPAA2_PMD_ERR("dpni is NULL");
1181                 return;
1182         }
1183
1184         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1185                                    priv->token, macaddr->addr_bytes);
1186         if (ret)
1187                 DPAA2_PMD_ERR(
1188                         "error: Removing the MAC ADDR failed: err = %d", ret);
1189 }
1190
1191 static int
1192 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1193                        struct rte_ether_addr *addr)
1194 {
1195         int ret;
1196         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1197         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1198
1199         PMD_INIT_FUNC_TRACE();
1200
1201         if (dpni == NULL) {
1202                 DPAA2_PMD_ERR("dpni is NULL");
1203                 return -EINVAL;
1204         }
1205
1206         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1207                                         priv->token, addr->addr_bytes);
1208
1209         if (ret)
1210                 DPAA2_PMD_ERR(
1211                         "error: Setting the MAC ADDR failed %d", ret);
1212
1213         return ret;
1214 }
1215
1216 static
1217 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1218                          struct rte_eth_stats *stats)
1219 {
1220         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1221         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1222         int32_t  retcode;
1223         uint8_t page0 = 0, page1 = 1, page2 = 2;
1224         union dpni_statistics value;
1225         int i;
1226         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1227
1228         memset(&value, 0, sizeof(union dpni_statistics));
1229
1230         PMD_INIT_FUNC_TRACE();
1231
1232         if (!dpni) {
1233                 DPAA2_PMD_ERR("dpni is NULL");
1234                 return -EINVAL;
1235         }
1236
1237         if (!stats) {
1238                 DPAA2_PMD_ERR("stats is NULL");
1239                 return -EINVAL;
1240         }
1241
1242         /*Get Counters from page_0*/
1243         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1244                                       page0, 0, &value);
1245         if (retcode)
1246                 goto err;
1247
1248         stats->ipackets = value.page_0.ingress_all_frames;
1249         stats->ibytes = value.page_0.ingress_all_bytes;
1250
1251         /*Get Counters from page_1*/
1252         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1253                                       page1, 0, &value);
1254         if (retcode)
1255                 goto err;
1256
1257         stats->opackets = value.page_1.egress_all_frames;
1258         stats->obytes = value.page_1.egress_all_bytes;
1259
1260         /*Get Counters from page_2*/
1261         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1262                                       page2, 0, &value);
1263         if (retcode)
1264                 goto err;
1265
1266         /* Ingress drop frame count due to configured rules */
1267         stats->ierrors = value.page_2.ingress_filtered_frames;
1268         /* Ingress drop frame count due to error */
1269         stats->ierrors += value.page_2.ingress_discarded_frames;
1270
1271         stats->oerrors = value.page_2.egress_discarded_frames;
1272         stats->imissed = value.page_2.ingress_nobuffer_discards;
1273
1274         /* Fill in per queue stats */
1275         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1276                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1277                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1278                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1279                 if (dpaa2_rxq)
1280                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1281                 if (dpaa2_txq)
1282                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1283
1284                 /* Byte counting is not implemented */
1285                 stats->q_ibytes[i]   = 0;
1286                 stats->q_obytes[i]   = 0;
1287         }
1288
1289         return 0;
1290
1291 err:
1292         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1293         return retcode;
1294 };
1295
1296 static int
1297 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1298                      unsigned int n)
1299 {
1300         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1301         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1302         int32_t  retcode;
1303         union dpni_statistics value[3] = {};
1304         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1305
1306         if (n < num)
1307                 return num;
1308
1309         if (xstats == NULL)
1310                 return 0;
1311
1312         /* Get Counters from page_0*/
1313         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1314                                       0, 0, &value[0]);
1315         if (retcode)
1316                 goto err;
1317
1318         /* Get Counters from page_1*/
1319         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1320                                       1, 0, &value[1]);
1321         if (retcode)
1322                 goto err;
1323
1324         /* Get Counters from page_2*/
1325         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1326                                       2, 0, &value[2]);
1327         if (retcode)
1328                 goto err;
1329
1330         for (i = 0; i < num; i++) {
1331                 xstats[i].id = i;
1332                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1333                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1334         }
1335         return i;
1336 err:
1337         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1338         return retcode;
1339 }
1340
1341 static int
1342 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1343                        struct rte_eth_xstat_name *xstats_names,
1344                        unsigned int limit)
1345 {
1346         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1347
1348         if (limit < stat_cnt)
1349                 return stat_cnt;
1350
1351         if (xstats_names != NULL)
1352                 for (i = 0; i < stat_cnt; i++)
1353                         strlcpy(xstats_names[i].name,
1354                                 dpaa2_xstats_strings[i].name,
1355                                 sizeof(xstats_names[i].name));
1356
1357         return stat_cnt;
1358 }
1359
1360 static int
1361 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1362                        uint64_t *values, unsigned int n)
1363 {
1364         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1365         uint64_t values_copy[stat_cnt];
1366
1367         if (!ids) {
1368                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1369                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1370                 int32_t  retcode;
1371                 union dpni_statistics value[3] = {};
1372
1373                 if (n < stat_cnt)
1374                         return stat_cnt;
1375
1376                 if (!values)
1377                         return 0;
1378
1379                 /* Get Counters from page_0*/
1380                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1381                                               0, 0, &value[0]);
1382                 if (retcode)
1383                         return 0;
1384
1385                 /* Get Counters from page_1*/
1386                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1387                                               1, 0, &value[1]);
1388                 if (retcode)
1389                         return 0;
1390
1391                 /* Get Counters from page_2*/
1392                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1393                                               2, 0, &value[2]);
1394                 if (retcode)
1395                         return 0;
1396
1397                 for (i = 0; i < stat_cnt; i++) {
1398                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1399                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1400                 }
1401                 return stat_cnt;
1402         }
1403
1404         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1405
1406         for (i = 0; i < n; i++) {
1407                 if (ids[i] >= stat_cnt) {
1408                         DPAA2_PMD_ERR("xstats id value isn't valid");
1409                         return -1;
1410                 }
1411                 values[i] = values_copy[ids[i]];
1412         }
1413         return n;
1414 }
1415
1416 static int
1417 dpaa2_xstats_get_names_by_id(
1418         struct rte_eth_dev *dev,
1419         struct rte_eth_xstat_name *xstats_names,
1420         const uint64_t *ids,
1421         unsigned int limit)
1422 {
1423         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1424         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1425
1426         if (!ids)
1427                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1428
1429         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1430
1431         for (i = 0; i < limit; i++) {
1432                 if (ids[i] >= stat_cnt) {
1433                         DPAA2_PMD_ERR("xstats id value isn't valid");
1434                         return -1;
1435                 }
1436                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1437         }
1438         return limit;
1439 }
1440
1441 static int
1442 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1443 {
1444         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1445         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1446         int retcode;
1447         int i;
1448         struct dpaa2_queue *dpaa2_q;
1449
1450         PMD_INIT_FUNC_TRACE();
1451
1452         if (dpni == NULL) {
1453                 DPAA2_PMD_ERR("dpni is NULL");
1454                 return -EINVAL;
1455         }
1456
1457         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1458         if (retcode)
1459                 goto error;
1460
1461         /* Reset the per queue stats in dpaa2_queue structure */
1462         for (i = 0; i < priv->nb_rx_queues; i++) {
1463                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1464                 if (dpaa2_q)
1465                         dpaa2_q->rx_pkts = 0;
1466         }
1467
1468         for (i = 0; i < priv->nb_tx_queues; i++) {
1469                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1470                 if (dpaa2_q)
1471                         dpaa2_q->tx_pkts = 0;
1472         }
1473
1474         return 0;
1475
1476 error:
1477         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1478         return retcode;
1479 };
1480
1481 /* return 0 means link status changed, -1 means not changed */
1482 static int
1483 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1484                         int wait_to_complete __rte_unused)
1485 {
1486         int ret;
1487         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1488         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1489         struct rte_eth_link link;
1490         struct dpni_link_state state = {0};
1491
1492         if (dpni == NULL) {
1493                 DPAA2_PMD_ERR("dpni is NULL");
1494                 return 0;
1495         }
1496
1497         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1498         if (ret < 0) {
1499                 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1500                 return -1;
1501         }
1502
1503         memset(&link, 0, sizeof(struct rte_eth_link));
1504         link.link_status = state.up;
1505         link.link_speed = state.rate;
1506
1507         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1508                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1509         else
1510                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1511
1512         ret = rte_eth_linkstatus_set(dev, &link);
1513         if (ret == -1)
1514                 DPAA2_PMD_DEBUG("No change in status");
1515         else
1516                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1517                                link.link_status ? "Up" : "Down");
1518
1519         return ret;
1520 }
1521
1522 /**
1523  * Toggle the DPNI to enable, if not already enabled.
1524  * This is not strictly PHY up/down - it is more of logical toggling.
1525  */
1526 static int
1527 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1528 {
1529         int ret = -EINVAL;
1530         struct dpaa2_dev_priv *priv;
1531         struct fsl_mc_io *dpni;
1532         int en = 0;
1533         struct dpni_link_state state = {0};
1534
1535         priv = dev->data->dev_private;
1536         dpni = (struct fsl_mc_io *)priv->hw;
1537
1538         if (dpni == NULL) {
1539                 DPAA2_PMD_ERR("dpni is NULL");
1540                 return ret;
1541         }
1542
1543         /* Check if DPNI is currently enabled */
1544         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1545         if (ret) {
1546                 /* Unable to obtain dpni status; Not continuing */
1547                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1548                 return -EINVAL;
1549         }
1550
1551         /* Enable link if not already enabled */
1552         if (!en) {
1553                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1554                 if (ret) {
1555                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1556                         return -EINVAL;
1557                 }
1558         }
1559         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1560         if (ret < 0) {
1561                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1562                 return -1;
1563         }
1564
1565         /* changing tx burst function to start enqueues */
1566         dev->tx_pkt_burst = dpaa2_dev_tx;
1567         dev->data->dev_link.link_status = state.up;
1568
1569         if (state.up)
1570                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1571         else
1572                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1573         return ret;
1574 }
1575
1576 /**
1577  * Toggle the DPNI to disable, if not already disabled.
1578  * This is not strictly PHY up/down - it is more of logical toggling.
1579  */
1580 static int
1581 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1582 {
1583         int ret = -EINVAL;
1584         struct dpaa2_dev_priv *priv;
1585         struct fsl_mc_io *dpni;
1586         int dpni_enabled = 0;
1587         int retries = 10;
1588
1589         PMD_INIT_FUNC_TRACE();
1590
1591         priv = dev->data->dev_private;
1592         dpni = (struct fsl_mc_io *)priv->hw;
1593
1594         if (dpni == NULL) {
1595                 DPAA2_PMD_ERR("Device has not yet been configured");
1596                 return ret;
1597         }
1598
1599         /*changing  tx burst function to avoid any more enqueues */
1600         dev->tx_pkt_burst = dummy_dev_tx;
1601
1602         /* Loop while dpni_disable() attempts to drain the egress FQs
1603          * and confirm them back to us.
1604          */
1605         do {
1606                 ret = dpni_disable(dpni, 0, priv->token);
1607                 if (ret) {
1608                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1609                         return ret;
1610                 }
1611                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1612                 if (ret) {
1613                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1614                         return ret;
1615                 }
1616                 if (dpni_enabled)
1617                         /* Allow the MC some slack */
1618                         rte_delay_us(100 * 1000);
1619         } while (dpni_enabled && --retries);
1620
1621         if (!retries) {
1622                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1623                 /* todo- we may have to manually cleanup queues.
1624                  */
1625         } else {
1626                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1627                                dev->data->port_id);
1628         }
1629
1630         dev->data->dev_link.link_status = 0;
1631
1632         return ret;
1633 }
1634
1635 static int
1636 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1637 {
1638         int ret = -EINVAL;
1639         struct dpaa2_dev_priv *priv;
1640         struct fsl_mc_io *dpni;
1641         struct dpni_link_state state = {0};
1642
1643         PMD_INIT_FUNC_TRACE();
1644
1645         priv = dev->data->dev_private;
1646         dpni = (struct fsl_mc_io *)priv->hw;
1647
1648         if (dpni == NULL || fc_conf == NULL) {
1649                 DPAA2_PMD_ERR("device not configured");
1650                 return ret;
1651         }
1652
1653         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1654         if (ret) {
1655                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1656                 return ret;
1657         }
1658
1659         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1660         if (state.options & DPNI_LINK_OPT_PAUSE) {
1661                 /* DPNI_LINK_OPT_PAUSE set
1662                  *  if ASYM_PAUSE not set,
1663                  *      RX Side flow control (handle received Pause frame)
1664                  *      TX side flow control (send Pause frame)
1665                  *  if ASYM_PAUSE set,
1666                  *      RX Side flow control (handle received Pause frame)
1667                  *      No TX side flow control (send Pause frame disabled)
1668                  */
1669                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1670                         fc_conf->mode = RTE_FC_FULL;
1671                 else
1672                         fc_conf->mode = RTE_FC_RX_PAUSE;
1673         } else {
1674                 /* DPNI_LINK_OPT_PAUSE not set
1675                  *  if ASYM_PAUSE set,
1676                  *      TX side flow control (send Pause frame)
1677                  *      No RX side flow control (No action on pause frame rx)
1678                  *  if ASYM_PAUSE not set,
1679                  *      Flow control disabled
1680                  */
1681                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1682                         fc_conf->mode = RTE_FC_TX_PAUSE;
1683                 else
1684                         fc_conf->mode = RTE_FC_NONE;
1685         }
1686
1687         return ret;
1688 }
1689
1690 static int
1691 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1692 {
1693         int ret = -EINVAL;
1694         struct dpaa2_dev_priv *priv;
1695         struct fsl_mc_io *dpni;
1696         struct dpni_link_state state = {0};
1697         struct dpni_link_cfg cfg = {0};
1698
1699         PMD_INIT_FUNC_TRACE();
1700
1701         priv = dev->data->dev_private;
1702         dpni = (struct fsl_mc_io *)priv->hw;
1703
1704         if (dpni == NULL) {
1705                 DPAA2_PMD_ERR("dpni is NULL");
1706                 return ret;
1707         }
1708
1709         /* It is necessary to obtain the current state before setting fc_conf
1710          * as MC would return error in case rate, autoneg or duplex values are
1711          * different.
1712          */
1713         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1714         if (ret) {
1715                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1716                 return -1;
1717         }
1718
1719         /* Disable link before setting configuration */
1720         dpaa2_dev_set_link_down(dev);
1721
1722         /* Based on fc_conf, update cfg */
1723         cfg.rate = state.rate;
1724         cfg.options = state.options;
1725
1726         /* update cfg with fc_conf */
1727         switch (fc_conf->mode) {
1728         case RTE_FC_FULL:
1729                 /* Full flow control;
1730                  * OPT_PAUSE set, ASYM_PAUSE not set
1731                  */
1732                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1733                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1734                 break;
1735         case RTE_FC_TX_PAUSE:
1736                 /* Enable RX flow control
1737                  * OPT_PAUSE not set;
1738                  * ASYM_PAUSE set;
1739                  */
1740                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1741                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1742                 break;
1743         case RTE_FC_RX_PAUSE:
1744                 /* Enable TX Flow control
1745                  * OPT_PAUSE set
1746                  * ASYM_PAUSE set
1747                  */
1748                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1749                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1750                 break;
1751         case RTE_FC_NONE:
1752                 /* Disable Flow control
1753                  * OPT_PAUSE not set
1754                  * ASYM_PAUSE not set
1755                  */
1756                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1757                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1758                 break;
1759         default:
1760                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1761                               fc_conf->mode);
1762                 return -1;
1763         }
1764
1765         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1766         if (ret)
1767                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1768                               ret);
1769
1770         /* Enable link */
1771         dpaa2_dev_set_link_up(dev);
1772
1773         return ret;
1774 }
1775
1776 static int
1777 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1778                           struct rte_eth_rss_conf *rss_conf)
1779 {
1780         struct rte_eth_dev_data *data = dev->data;
1781         struct rte_eth_conf *eth_conf = &data->dev_conf;
1782         int ret;
1783
1784         PMD_INIT_FUNC_TRACE();
1785
1786         if (rss_conf->rss_hf) {
1787                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1788                 if (ret) {
1789                         DPAA2_PMD_ERR("Unable to set flow dist");
1790                         return ret;
1791                 }
1792         } else {
1793                 ret = dpaa2_remove_flow_dist(dev, 0);
1794                 if (ret) {
1795                         DPAA2_PMD_ERR("Unable to remove flow dist");
1796                         return ret;
1797                 }
1798         }
1799         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1800         return 0;
1801 }
1802
1803 static int
1804 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1805                             struct rte_eth_rss_conf *rss_conf)
1806 {
1807         struct rte_eth_dev_data *data = dev->data;
1808         struct rte_eth_conf *eth_conf = &data->dev_conf;
1809
1810         /* dpaa2 does not support rss_key, so length should be 0*/
1811         rss_conf->rss_key_len = 0;
1812         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1813         return 0;
1814 }
1815
1816 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1817                 int eth_rx_queue_id,
1818                 uint16_t dpcon_id,
1819                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1820 {
1821         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1822         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1823         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1824         uint8_t flow_id = dpaa2_ethq->flow_id;
1825         struct dpni_queue cfg;
1826         uint8_t options;
1827         int ret;
1828
1829         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1830                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1831         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1832                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1833         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
1834                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
1835         else
1836                 return -EINVAL;
1837
1838         memset(&cfg, 0, sizeof(struct dpni_queue));
1839         options = DPNI_QUEUE_OPT_DEST;
1840         cfg.destination.type = DPNI_DEST_DPCON;
1841         cfg.destination.id = dpcon_id;
1842         cfg.destination.priority = queue_conf->ev.priority;
1843
1844         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1845                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1846                 cfg.destination.hold_active = 1;
1847         }
1848
1849         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
1850                         !eth_priv->en_ordered) {
1851                 struct opr_cfg ocfg;
1852
1853                 /* Restoration window size = 256 frames */
1854                 ocfg.oprrws = 3;
1855                 /* Restoration window size = 512 frames for LX2 */
1856                 if (dpaa2_svr_family == SVR_LX2160A)
1857                         ocfg.oprrws = 4;
1858                 /* Auto advance NESN window enabled */
1859                 ocfg.oa = 1;
1860                 /* Late arrival window size disabled */
1861                 ocfg.olws = 0;
1862                 /* ORL resource exhaustaion advance NESN disabled */
1863                 ocfg.oeane = 0;
1864                 /* Loose ordering enabled */
1865                 ocfg.oloe = 1;
1866                 eth_priv->en_loose_ordered = 1;
1867                 /* Strict ordering enabled if explicitly set */
1868                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1869                         ocfg.oloe = 0;
1870                         eth_priv->en_loose_ordered = 0;
1871                 }
1872
1873                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
1874                                    dpaa2_ethq->tc_index, flow_id,
1875                                    OPR_OPT_CREATE, &ocfg);
1876                 if (ret) {
1877                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
1878                         return ret;
1879                 }
1880
1881                 eth_priv->en_ordered = 1;
1882         }
1883
1884         options |= DPNI_QUEUE_OPT_USER_CTX;
1885         cfg.user_context = (size_t)(dpaa2_ethq);
1886
1887         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1888                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1889         if (ret) {
1890                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1891                 return ret;
1892         }
1893
1894         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1895
1896         return 0;
1897 }
1898
1899 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1900                 int eth_rx_queue_id)
1901 {
1902         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1903         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1904         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1905         uint8_t flow_id = dpaa2_ethq->flow_id;
1906         struct dpni_queue cfg;
1907         uint8_t options;
1908         int ret;
1909
1910         memset(&cfg, 0, sizeof(struct dpni_queue));
1911         options = DPNI_QUEUE_OPT_DEST;
1912         cfg.destination.type = DPNI_DEST_NONE;
1913
1914         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1915                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1916         if (ret)
1917                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1918
1919         return ret;
1920 }
1921
1922 static inline int
1923 dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op)
1924 {
1925         unsigned int i;
1926
1927         for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) {
1928                 if (dpaa2_supported_filter_ops[i] == filter_op)
1929                         return 0;
1930         }
1931         return -ENOTSUP;
1932 }
1933
1934 static int
1935 dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev,
1936                     enum rte_filter_type filter_type,
1937                                  enum rte_filter_op filter_op,
1938                                  void *arg)
1939 {
1940         int ret = 0;
1941
1942         if (!dev)
1943                 return -ENODEV;
1944
1945         switch (filter_type) {
1946         case RTE_ETH_FILTER_GENERIC:
1947                 if (dpaa2_dev_verify_filter_ops(filter_op) < 0) {
1948                         ret = -ENOTSUP;
1949                         break;
1950                 }
1951                 *(const void **)arg = &dpaa2_flow_ops;
1952                 dpaa2_filter_type |= filter_type;
1953                 break;
1954         default:
1955                 RTE_LOG(ERR, PMD, "Filter type (%d) not supported",
1956                         filter_type);
1957                 ret = -ENOTSUP;
1958                 break;
1959         }
1960         return ret;
1961 }
1962
1963 static struct eth_dev_ops dpaa2_ethdev_ops = {
1964         .dev_configure    = dpaa2_eth_dev_configure,
1965         .dev_start            = dpaa2_dev_start,
1966         .dev_stop             = dpaa2_dev_stop,
1967         .dev_close            = dpaa2_dev_close,
1968         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1969         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1970         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1971         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1972         .dev_set_link_up      = dpaa2_dev_set_link_up,
1973         .dev_set_link_down    = dpaa2_dev_set_link_down,
1974         .link_update       = dpaa2_dev_link_update,
1975         .stats_get             = dpaa2_dev_stats_get,
1976         .xstats_get            = dpaa2_dev_xstats_get,
1977         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
1978         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1979         .xstats_get_names      = dpaa2_xstats_get_names,
1980         .stats_reset       = dpaa2_dev_stats_reset,
1981         .xstats_reset         = dpaa2_dev_stats_reset,
1982         .fw_version_get    = dpaa2_fw_version_get,
1983         .dev_infos_get     = dpaa2_dev_info_get,
1984         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1985         .mtu_set           = dpaa2_dev_mtu_set,
1986         .vlan_filter_set      = dpaa2_vlan_filter_set,
1987         .vlan_offload_set     = dpaa2_vlan_offload_set,
1988         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
1989         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1990         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1991         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1992         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1993         .rx_queue_count       = dpaa2_dev_rx_queue_count,
1994         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
1995         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
1996         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1997         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1998         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1999         .rss_hash_update      = dpaa2_dev_rss_hash_update,
2000         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2001         .filter_ctrl          = dpaa2_dev_flow_ctrl,
2002 };
2003
2004 /* Populate the mac address from physically available (u-boot/firmware) and/or
2005  * one set by higher layers like MC (restool) etc.
2006  * Returns the table of MAC entries (multiple entries)
2007  */
2008 static int
2009 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2010                   struct rte_ether_addr *mac_entry)
2011 {
2012         int ret;
2013         struct rte_ether_addr phy_mac, prime_mac;
2014
2015         memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2016         memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2017
2018         /* Get the physical device MAC address */
2019         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2020                                      phy_mac.addr_bytes);
2021         if (ret) {
2022                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2023                 goto cleanup;
2024         }
2025
2026         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2027                                         prime_mac.addr_bytes);
2028         if (ret) {
2029                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2030                 goto cleanup;
2031         }
2032
2033         /* Now that both MAC have been obtained, do:
2034          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2035          *     and return phy
2036          *  If empty_mac(phy), return prime.
2037          *  if both are empty, create random MAC, set as prime and return
2038          */
2039         if (!rte_is_zero_ether_addr(&phy_mac)) {
2040                 /* If the addresses are not same, overwrite prime */
2041                 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2042                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2043                                                         priv->token,
2044                                                         phy_mac.addr_bytes);
2045                         if (ret) {
2046                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2047                                               ret);
2048                                 goto cleanup;
2049                         }
2050                         memcpy(&prime_mac, &phy_mac,
2051                                 sizeof(struct rte_ether_addr));
2052                 }
2053         } else if (rte_is_zero_ether_addr(&prime_mac)) {
2054                 /* In case phys and prime, both are zero, create random MAC */
2055                 rte_eth_random_addr(prime_mac.addr_bytes);
2056                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2057                                                 priv->token,
2058                                                 prime_mac.addr_bytes);
2059                 if (ret) {
2060                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2061                         goto cleanup;
2062                 }
2063         }
2064
2065         /* prime_mac the final MAC address */
2066         memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2067         return 0;
2068
2069 cleanup:
2070         return -1;
2071 }
2072
2073 static int
2074 check_devargs_handler(__rte_unused const char *key, const char *value,
2075                       __rte_unused void *opaque)
2076 {
2077         if (strcmp(value, "1"))
2078                 return -1;
2079
2080         return 0;
2081 }
2082
2083 static int
2084 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2085 {
2086         struct rte_kvargs *kvlist;
2087
2088         if (!devargs)
2089                 return 0;
2090
2091         kvlist = rte_kvargs_parse(devargs->args, NULL);
2092         if (!kvlist)
2093                 return 0;
2094
2095         if (!rte_kvargs_count(kvlist, key)) {
2096                 rte_kvargs_free(kvlist);
2097                 return 0;
2098         }
2099
2100         if (rte_kvargs_process(kvlist, key,
2101                                check_devargs_handler, NULL) < 0) {
2102                 rte_kvargs_free(kvlist);
2103                 return 0;
2104         }
2105         rte_kvargs_free(kvlist);
2106
2107         return 1;
2108 }
2109
2110 static int
2111 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2112 {
2113         struct rte_device *dev = eth_dev->device;
2114         struct rte_dpaa2_device *dpaa2_dev;
2115         struct fsl_mc_io *dpni_dev;
2116         struct dpni_attr attr;
2117         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2118         struct dpni_buffer_layout layout;
2119         int ret, hw_id, i;
2120
2121         PMD_INIT_FUNC_TRACE();
2122
2123         /* For secondary processes, the primary has done all the work */
2124         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2125                 /* In case of secondary, only burst and ops API need to be
2126                  * plugged.
2127                  */
2128                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2129                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2130                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2131                 else
2132                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2133                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2134                 return 0;
2135         }
2136
2137         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2138
2139         hw_id = dpaa2_dev->object_id;
2140
2141         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2142         if (!dpni_dev) {
2143                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2144                 return -1;
2145         }
2146
2147         dpni_dev->regs = rte_mcp_ptr_list[0];
2148         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2149         if (ret) {
2150                 DPAA2_PMD_ERR(
2151                              "Failure in opening dpni@%d with err code %d",
2152                              hw_id, ret);
2153                 rte_free(dpni_dev);
2154                 return -1;
2155         }
2156
2157         /* Clean the device first */
2158         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2159         if (ret) {
2160                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2161                               hw_id, ret);
2162                 goto init_err;
2163         }
2164
2165         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2166         if (ret) {
2167                 DPAA2_PMD_ERR(
2168                              "Failure in get dpni@%d attribute, err code %d",
2169                              hw_id, ret);
2170                 goto init_err;
2171         }
2172
2173         priv->num_rx_tc = attr.num_rx_tcs;
2174
2175         for (i = 0; i < attr.num_rx_tcs; i++)
2176                 priv->nb_rx_queues += attr.num_queues;
2177
2178         /* Using number of TX queues as number of TX TCs */
2179         priv->nb_tx_queues = attr.num_tx_tcs;
2180
2181         DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
2182                         priv->num_rx_tc, priv->nb_rx_queues,
2183                         priv->nb_tx_queues);
2184
2185         priv->hw = dpni_dev;
2186         priv->hw_id = hw_id;
2187         priv->options = attr.options;
2188         priv->max_mac_filters = attr.mac_filter_entries;
2189         priv->max_vlan_filters = attr.vlan_filter_entries;
2190         priv->flags = 0;
2191
2192         /* Allocate memory for hardware structure for queues */
2193         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2194         if (ret) {
2195                 DPAA2_PMD_ERR("Queue allocation Failed");
2196                 goto init_err;
2197         }
2198
2199         /* Allocate memory for storing MAC addresses.
2200          * Table of mac_filter_entries size is allocated so that RTE ether lib
2201          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2202          */
2203         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2204                 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2205         if (eth_dev->data->mac_addrs == NULL) {
2206                 DPAA2_PMD_ERR(
2207                    "Failed to allocate %d bytes needed to store MAC addresses",
2208                    RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2209                 ret = -ENOMEM;
2210                 goto init_err;
2211         }
2212
2213         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2214         if (ret) {
2215                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2216                 rte_free(eth_dev->data->mac_addrs);
2217                 eth_dev->data->mac_addrs = NULL;
2218                 goto init_err;
2219         }
2220
2221         /* ... tx buffer layout ... */
2222         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2223         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2224         layout.pass_frame_status = 1;
2225         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2226                                      DPNI_QUEUE_TX, &layout);
2227         if (ret) {
2228                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2229                 goto init_err;
2230         }
2231
2232         /* ... tx-conf and error buffer layout ... */
2233         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2234         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2235         layout.pass_frame_status = 1;
2236         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2237                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2238         if (ret) {
2239                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2240                              ret);
2241                 goto init_err;
2242         }
2243
2244         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2245
2246         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2247                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2248                 DPAA2_PMD_INFO("Loopback mode");
2249         } else {
2250                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2251         }
2252         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2253
2254         /*Init fields w.r.t. classficaition*/
2255         memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg));
2256         priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2257         if (!priv->extract.qos_extract_param) {
2258                 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2259                             " classificaiton ", ret);
2260                 goto init_err;
2261         }
2262         for (i = 0; i < MAX_TCS; i++) {
2263                 memset(&priv->extract.fs_key_cfg[i], 0,
2264                         sizeof(struct dpkg_profile_cfg));
2265                 priv->extract.fs_extract_param[i] =
2266                         (size_t)rte_malloc(NULL, 256, 64);
2267                 if (!priv->extract.fs_extract_param[i]) {
2268                         DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2269                                      ret);
2270                         goto init_err;
2271                 }
2272         }
2273
2274         RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2275         return 0;
2276 init_err:
2277         dpaa2_dev_uninit(eth_dev);
2278         return ret;
2279 }
2280
2281 static int
2282 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2283 {
2284         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2285         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2286         int i, ret;
2287
2288         PMD_INIT_FUNC_TRACE();
2289
2290         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2291                 return 0;
2292
2293         if (!dpni) {
2294                 DPAA2_PMD_WARN("Already closed or not started");
2295                 return -1;
2296         }
2297
2298         dpaa2_dev_close(eth_dev);
2299
2300         dpaa2_free_rx_tx_queues(eth_dev);
2301
2302         /* Close the device at underlying layer*/
2303         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2304         if (ret) {
2305                 DPAA2_PMD_ERR(
2306                              "Failure closing dpni device with err code %d",
2307                              ret);
2308         }
2309
2310         /* Free the allocated memory for ethernet private data and dpni*/
2311         priv->hw = NULL;
2312         rte_free(dpni);
2313
2314         for (i = 0; i < MAX_TCS; i++) {
2315                 if (priv->extract.fs_extract_param[i])
2316                         rte_free((void *)(size_t)priv->extract.fs_extract_param[i]);
2317         }
2318
2319         if (priv->extract.qos_extract_param)
2320                 rte_free((void *)(size_t)priv->extract.qos_extract_param);
2321
2322         eth_dev->dev_ops = NULL;
2323         eth_dev->rx_pkt_burst = NULL;
2324         eth_dev->tx_pkt_burst = NULL;
2325
2326         DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2327         return 0;
2328 }
2329
2330 static int
2331 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2332                 struct rte_dpaa2_device *dpaa2_dev)
2333 {
2334         struct rte_eth_dev *eth_dev;
2335         int diag;
2336
2337         if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2338                 RTE_PKTMBUF_HEADROOM) {
2339                 DPAA2_PMD_ERR(
2340                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2341                 RTE_PKTMBUF_HEADROOM,
2342                 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2343
2344                 return -1;
2345         }
2346
2347         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2348                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2349                 if (!eth_dev)
2350                         return -ENODEV;
2351                 eth_dev->data->dev_private = rte_zmalloc(
2352                                                 "ethdev private structure",
2353                                                 sizeof(struct dpaa2_dev_priv),
2354                                                 RTE_CACHE_LINE_SIZE);
2355                 if (eth_dev->data->dev_private == NULL) {
2356                         DPAA2_PMD_CRIT(
2357                                 "Unable to allocate memory for private data");
2358                         rte_eth_dev_release_port(eth_dev);
2359                         return -ENOMEM;
2360                 }
2361         } else {
2362                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2363                 if (!eth_dev)
2364                         return -ENODEV;
2365         }
2366
2367         eth_dev->device = &dpaa2_dev->device;
2368
2369         dpaa2_dev->eth_dev = eth_dev;
2370         eth_dev->data->rx_mbuf_alloc_failed = 0;
2371
2372         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2373                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2374
2375         /* Invoke PMD device initialization function */
2376         diag = dpaa2_dev_init(eth_dev);
2377         if (diag == 0) {
2378                 rte_eth_dev_probing_finish(eth_dev);
2379                 return 0;
2380         }
2381
2382         rte_eth_dev_release_port(eth_dev);
2383         return diag;
2384 }
2385
2386 static int
2387 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2388 {
2389         struct rte_eth_dev *eth_dev;
2390
2391         eth_dev = dpaa2_dev->eth_dev;
2392         dpaa2_dev_uninit(eth_dev);
2393
2394         rte_eth_dev_release_port(eth_dev);
2395
2396         return 0;
2397 }
2398
2399 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2400         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2401         .drv_type = DPAA2_ETH,
2402         .probe = rte_dpaa2_probe,
2403         .remove = rte_dpaa2_remove,
2404 };
2405
2406 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2407 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2408                 DRIVER_LOOPBACK_MODE "=<int>");
2409 RTE_INIT(dpaa2_pmd_init_log)
2410 {
2411         dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2412         if (dpaa2_logtype_pmd >= 0)
2413                 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
2414 }