7b09a8749827adc2fb874bd24d382a7c6497bab7
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2016 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_fslmc.h>
46
47 #include <fslmc_logs.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_mempool.h>
51 #include <dpaa2_hw_dpio.h>
52 #include <mc/fsl_dpmng.h>
53 #include "dpaa2_ethdev.h"
54
55 struct rte_dpaa2_xstats_name_off {
56         char name[RTE_ETH_XSTATS_NAME_SIZE];
57         uint8_t page_id; /* dpni statistics page id */
58         uint8_t stats_id; /* stats id in the given page */
59 };
60
61 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
62         {"ingress_multicast_frames", 0, 2},
63         {"ingress_multicast_bytes", 0, 3},
64         {"ingress_broadcast_frames", 0, 4},
65         {"ingress_broadcast_bytes", 0, 5},
66         {"egress_multicast_frames", 1, 2},
67         {"egress_multicast_bytes", 1, 3},
68         {"egress_broadcast_frames", 1, 4},
69         {"egress_broadcast_bytes", 1, 5},
70         {"ingress_filtered_frames", 2, 0},
71         {"ingress_discarded_frames", 2, 1},
72         {"ingress_nobuffer_discards", 2, 2},
73         {"egress_discarded_frames", 2, 3},
74         {"egress_confirmed_frames", 2, 4},
75 };
76
77 static struct rte_dpaa2_driver rte_dpaa2_pmd;
78 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
79 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
80                                  int wait_to_complete);
81 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
82 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
83 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
84
85 /**
86  * Atomically reads the link status information from global
87  * structure rte_eth_dev.
88  *
89  * @param dev
90  *   - Pointer to the structure rte_eth_dev to read from.
91  *   - Pointer to the buffer to be saved with the link status.
92  *
93  * @return
94  *   - On success, zero.
95  *   - On failure, negative value.
96  */
97 static inline int
98 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
99                                   struct rte_eth_link *link)
100 {
101         struct rte_eth_link *dst = link;
102         struct rte_eth_link *src = &dev->data->dev_link;
103
104         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
105                                 *(uint64_t *)src) == 0)
106                 return -1;
107
108         return 0;
109 }
110
111 /**
112  * Atomically writes the link status information into global
113  * structure rte_eth_dev.
114  *
115  * @param dev
116  *   - Pointer to the structure rte_eth_dev to read from.
117  *   - Pointer to the buffer to be saved with the link status.
118  *
119  * @return
120  *   - On success, zero.
121  *   - On failure, negative value.
122  */
123 static inline int
124 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
125                                    struct rte_eth_link *link)
126 {
127         struct rte_eth_link *dst = &dev->data->dev_link;
128         struct rte_eth_link *src = link;
129
130         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
131                                 *(uint64_t *)src) == 0)
132                 return -1;
133
134         return 0;
135 }
136
137 static int
138 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
139 {
140         int ret;
141         struct dpaa2_dev_priv *priv = dev->data->dev_private;
142         struct fsl_mc_io *dpni = priv->hw;
143
144         PMD_INIT_FUNC_TRACE();
145
146         if (dpni == NULL) {
147                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
148                 return -1;
149         }
150
151         if (on)
152                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
153                                        priv->token, vlan_id);
154         else
155                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
156                                           priv->token, vlan_id);
157
158         if (ret < 0)
159                 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
160                             ret, vlan_id, priv->hw_id);
161
162         return ret;
163 }
164
165 static void
166 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
167 {
168         struct dpaa2_dev_priv *priv = dev->data->dev_private;
169         struct fsl_mc_io *dpni = priv->hw;
170         int ret;
171
172         PMD_INIT_FUNC_TRACE();
173
174         if (mask & ETH_VLAN_FILTER_MASK) {
175                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
176                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
177                                                       priv->token, true);
178                 else
179                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
180                                                       priv->token, false);
181                 if (ret < 0)
182                         RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
183                                 ret);
184         }
185 }
186
187 static int
188 dpaa2_fw_version_get(struct rte_eth_dev *dev,
189                      char *fw_version,
190                      size_t fw_size)
191 {
192         int ret;
193         struct dpaa2_dev_priv *priv = dev->data->dev_private;
194         struct fsl_mc_io *dpni = priv->hw;
195         struct mc_soc_version mc_plat_info = {0};
196         struct mc_version mc_ver_info = {0};
197
198         PMD_INIT_FUNC_TRACE();
199
200         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
201                 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
202
203         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
204                 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
205
206         ret = snprintf(fw_version, fw_size,
207                        "%x-%d.%d.%d",
208                        mc_plat_info.svr,
209                        mc_ver_info.major,
210                        mc_ver_info.minor,
211                        mc_ver_info.revision);
212
213         ret += 1; /* add the size of '\0' */
214         if (fw_size < (uint32_t)ret)
215                 return ret;
216         else
217                 return 0;
218 }
219
220 static void
221 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
222 {
223         struct dpaa2_dev_priv *priv = dev->data->dev_private;
224
225         PMD_INIT_FUNC_TRACE();
226
227         dev_info->if_index = priv->hw_id;
228
229         dev_info->max_mac_addrs = priv->max_mac_filters;
230         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
231         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
232         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
233         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
234         dev_info->rx_offload_capa =
235                 DEV_RX_OFFLOAD_IPV4_CKSUM |
236                 DEV_RX_OFFLOAD_UDP_CKSUM |
237                 DEV_RX_OFFLOAD_TCP_CKSUM |
238                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
239         dev_info->tx_offload_capa =
240                 DEV_TX_OFFLOAD_IPV4_CKSUM |
241                 DEV_TX_OFFLOAD_UDP_CKSUM |
242                 DEV_TX_OFFLOAD_TCP_CKSUM |
243                 DEV_TX_OFFLOAD_SCTP_CKSUM |
244                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
245         dev_info->speed_capa = ETH_LINK_SPEED_1G |
246                         ETH_LINK_SPEED_2_5G |
247                         ETH_LINK_SPEED_10G;
248 }
249
250 static int
251 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
252 {
253         struct dpaa2_dev_priv *priv = dev->data->dev_private;
254         uint16_t dist_idx;
255         uint32_t vq_id;
256         struct dpaa2_queue *mc_q, *mcq;
257         uint32_t tot_queues;
258         int i;
259         struct dpaa2_queue *dpaa2_q;
260
261         PMD_INIT_FUNC_TRACE();
262
263         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
264         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
265                           RTE_CACHE_LINE_SIZE);
266         if (!mc_q) {
267                 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
268                 return -1;
269         }
270
271         for (i = 0; i < priv->nb_rx_queues; i++) {
272                 mc_q->dev = dev;
273                 priv->rx_vq[i] = mc_q++;
274                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
275                 dpaa2_q->q_storage = rte_malloc("dq_storage",
276                                         sizeof(struct queue_storage_info_t),
277                                         RTE_CACHE_LINE_SIZE);
278                 if (!dpaa2_q->q_storage)
279                         goto fail;
280
281                 memset(dpaa2_q->q_storage, 0,
282                        sizeof(struct queue_storage_info_t));
283                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
284                         goto fail;
285         }
286
287         for (i = 0; i < priv->nb_tx_queues; i++) {
288                 mc_q->dev = dev;
289                 mc_q->flow_id = 0xffff;
290                 priv->tx_vq[i] = mc_q++;
291                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
292                 dpaa2_q->cscn = rte_malloc(NULL,
293                                            sizeof(struct qbman_result), 16);
294                 if (!dpaa2_q->cscn)
295                         goto fail_tx;
296         }
297
298         vq_id = 0;
299         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
300                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
301                 mcq->tc_index = DPAA2_DEF_TC;
302                 mcq->flow_id = dist_idx;
303                 vq_id++;
304         }
305
306         return 0;
307 fail_tx:
308         i -= 1;
309         while (i >= 0) {
310                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
311                 rte_free(dpaa2_q->cscn);
312                 priv->tx_vq[i--] = NULL;
313         }
314         i = priv->nb_rx_queues;
315 fail:
316         i -= 1;
317         mc_q = priv->rx_vq[0];
318         while (i >= 0) {
319                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
320                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
321                 rte_free(dpaa2_q->q_storage);
322                 priv->rx_vq[i--] = NULL;
323         }
324         rte_free(mc_q);
325         return -1;
326 }
327
328 static int
329 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
330 {
331         struct rte_eth_dev_data *data = dev->data;
332         struct rte_eth_conf *eth_conf = &data->dev_conf;
333         int ret;
334
335         PMD_INIT_FUNC_TRACE();
336
337         if (eth_conf->rxmode.jumbo_frame == 1) {
338                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
339                         ret = dpaa2_dev_mtu_set(dev,
340                                         eth_conf->rxmode.max_rx_pkt_len);
341                         if (ret) {
342                                 PMD_INIT_LOG(ERR,
343                                              "unable to set mtu. check config\n");
344                                 return ret;
345                         }
346                 } else {
347                         return -1;
348                 }
349         }
350
351         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
352                 ret = dpaa2_setup_flow_dist(dev,
353                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
354                 if (ret) {
355                         PMD_INIT_LOG(ERR, "unable to set flow distribution."
356                                      "please check queue config\n");
357                         return ret;
358                 }
359         }
360
361         /* update the current status */
362         dpaa2_dev_link_update(dev, 0);
363
364         return 0;
365 }
366
367 /* Function to setup RX flow information. It contains traffic class ID,
368  * flow ID, destination configuration etc.
369  */
370 static int
371 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
372                          uint16_t rx_queue_id,
373                          uint16_t nb_rx_desc __rte_unused,
374                          unsigned int socket_id __rte_unused,
375                          const struct rte_eth_rxconf *rx_conf __rte_unused,
376                          struct rte_mempool *mb_pool)
377 {
378         struct dpaa2_dev_priv *priv = dev->data->dev_private;
379         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
380         struct mc_soc_version mc_plat_info = {0};
381         struct dpaa2_queue *dpaa2_q;
382         struct dpni_queue cfg;
383         uint8_t options = 0;
384         uint8_t flow_id;
385         uint32_t bpid;
386         int ret;
387
388         PMD_INIT_FUNC_TRACE();
389
390         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
391                      dev, rx_queue_id, mb_pool, rx_conf);
392
393         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
394                 bpid = mempool_to_bpid(mb_pool);
395                 ret = dpaa2_attach_bp_list(priv,
396                                            rte_dpaa2_bpid_info[bpid].bp_list);
397                 if (ret)
398                         return ret;
399         }
400         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
401         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
402
403         /*Get the flow id from given VQ id*/
404         flow_id = rx_queue_id % priv->nb_rx_queues;
405         memset(&cfg, 0, sizeof(struct dpni_queue));
406
407         options = options | DPNI_QUEUE_OPT_USER_CTX;
408         cfg.user_context = (uint64_t)(dpaa2_q);
409
410         /*if ls2088 or rev2 device, enable the stashing */
411
412         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
413                 PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
414
415         if ((mc_plat_info.svr & 0xffff0000) != SVR_LS2080A) {
416                 options |= DPNI_QUEUE_OPT_FLC;
417                 cfg.flc.stash_control = true;
418                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
419                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
420                  * data stashing setting 01 01 00 (0x14) to enable
421                  * 1 line data, 1 line annotation
422                  */
423                 cfg.flc.value |= 0x14;
424         }
425         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
426                              dpaa2_q->tc_index, flow_id, options, &cfg);
427         if (ret) {
428                 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
429                 return -1;
430         }
431
432         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
433                 struct dpni_taildrop taildrop;
434
435                 taildrop.enable = 1;
436                 /*enabling per rx queue congestion control */
437                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
438                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
439                 taildrop.oal = CONG_RX_OAL;
440                 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
441                              rx_queue_id);
442                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
443                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
444                                         dpaa2_q->tc_index, flow_id, &taildrop);
445                 if (ret) {
446                         PMD_INIT_LOG(ERR, "Error in setting the rx flow"
447                                      " err : = %d\n", ret);
448                         return -1;
449                 }
450         }
451
452         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
453         return 0;
454 }
455
456 static int
457 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
458                          uint16_t tx_queue_id,
459                          uint16_t nb_tx_desc __rte_unused,
460                          unsigned int socket_id __rte_unused,
461                          const struct rte_eth_txconf *tx_conf __rte_unused)
462 {
463         struct dpaa2_dev_priv *priv = dev->data->dev_private;
464         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
465                 priv->tx_vq[tx_queue_id];
466         struct fsl_mc_io *dpni = priv->hw;
467         struct dpni_queue tx_conf_cfg;
468         struct dpni_queue tx_flow_cfg;
469         uint8_t options = 0, flow_id;
470         uint32_t tc_id;
471         int ret;
472
473         PMD_INIT_FUNC_TRACE();
474
475         /* Return if queue already configured */
476         if (dpaa2_q->flow_id != 0xffff)
477                 return 0;
478
479         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
480         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
481
482         tc_id = tx_queue_id;
483         flow_id = 0;
484
485         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
486                              tc_id, flow_id, options, &tx_flow_cfg);
487         if (ret) {
488                 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
489                              "tc_id=%d, flow =%d ErrorCode = %x\n",
490                              tc_id, flow_id, -ret);
491                         return -1;
492         }
493
494         dpaa2_q->flow_id = flow_id;
495
496         if (tx_queue_id == 0) {
497                 /*Set tx-conf and error configuration*/
498                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
499                                                     priv->token,
500                                                     DPNI_CONF_DISABLE);
501                 if (ret) {
502                         PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
503                                      " ErrorCode = %x", ret);
504                         return -1;
505                 }
506         }
507         dpaa2_q->tc_index = tc_id;
508
509         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
510                 struct dpni_congestion_notification_cfg cong_notif_cfg;
511
512                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
513                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
514                 /* Notify that the queue is not congested when the data in
515                  * the queue is below this thershold.
516                  */
517                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
518                 cong_notif_cfg.message_ctx = 0;
519                 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
520                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
521                 cong_notif_cfg.notification_mode =
522                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
523                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
524                                          DPNI_CONG_OPT_COHERENT_WRITE;
525
526                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
527                                                        priv->token,
528                                                        DPNI_QUEUE_TX,
529                                                        tc_id,
530                                                        &cong_notif_cfg);
531                 if (ret) {
532                         PMD_INIT_LOG(ERR,
533                            "Error in setting tx congestion notification: = %d",
534                            -ret);
535                         return -ret;
536                 }
537         }
538         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
539         return 0;
540 }
541
542 static void
543 dpaa2_dev_rx_queue_release(void *q __rte_unused)
544 {
545         PMD_INIT_FUNC_TRACE();
546 }
547
548 static void
549 dpaa2_dev_tx_queue_release(void *q __rte_unused)
550 {
551         PMD_INIT_FUNC_TRACE();
552 }
553
554 static const uint32_t *
555 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
556 {
557         static const uint32_t ptypes[] = {
558                 /*todo -= add more types */
559                 RTE_PTYPE_L2_ETHER,
560                 RTE_PTYPE_L3_IPV4,
561                 RTE_PTYPE_L3_IPV4_EXT,
562                 RTE_PTYPE_L3_IPV6,
563                 RTE_PTYPE_L3_IPV6_EXT,
564                 RTE_PTYPE_L4_TCP,
565                 RTE_PTYPE_L4_UDP,
566                 RTE_PTYPE_L4_SCTP,
567                 RTE_PTYPE_L4_ICMP,
568                 RTE_PTYPE_UNKNOWN
569         };
570
571         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
572                 return ptypes;
573         return NULL;
574 }
575
576 /**
577  * Dpaa2 link Interrupt handler
578  *
579  * @param param
580  *  The address of parameter (struct rte_eth_dev *) regsitered before.
581  *
582  * @return
583  *  void
584  */
585 static void
586 dpaa2_interrupt_handler(void *param)
587 {
588         struct rte_eth_dev *dev = param;
589         struct dpaa2_dev_priv *priv = dev->data->dev_private;
590         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
591         int ret;
592         int irq_index = DPNI_IRQ_INDEX;
593         unsigned int status = 0, clear = 0;
594
595         PMD_INIT_FUNC_TRACE();
596
597         if (dpni == NULL) {
598                 RTE_LOG(ERR, PMD, "dpni is NULL");
599                 return;
600         }
601
602         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
603                                   irq_index, &status);
604         if (unlikely(ret)) {
605                 RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret);
606                 clear = 0xffffffff;
607                 goto out;
608         }
609
610         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
611                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
612                 dpaa2_dev_link_update(dev, 0);
613                 /* calling all the apps registered for link status event */
614                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
615                                               NULL, NULL);
616         }
617 out:
618         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
619                                     irq_index, clear);
620         if (unlikely(ret))
621                 RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret);
622 }
623
624 static int
625 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
626 {
627         int err = 0;
628         struct dpaa2_dev_priv *priv = dev->data->dev_private;
629         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
630         int irq_index = DPNI_IRQ_INDEX;
631         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
632
633         PMD_INIT_FUNC_TRACE();
634
635         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
636                                 irq_index, mask);
637         if (err < 0) {
638                 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err,
639                              strerror(-err));
640                 return err;
641         }
642
643         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
644                                   irq_index, enable);
645         if (err < 0)
646                 PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err,
647                              strerror(-err));
648
649         return err;
650 }
651
652 static int
653 dpaa2_dev_start(struct rte_eth_dev *dev)
654 {
655         struct rte_device *rdev = dev->device;
656         struct rte_dpaa2_device *dpaa2_dev;
657         struct rte_eth_dev_data *data = dev->data;
658         struct dpaa2_dev_priv *priv = data->dev_private;
659         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
660         struct dpni_queue cfg;
661         struct dpni_error_cfg   err_cfg;
662         uint16_t qdid;
663         struct dpni_queue_id qid;
664         struct dpaa2_queue *dpaa2_q;
665         int ret, i;
666         struct rte_intr_handle *intr_handle;
667
668         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
669         intr_handle = &dpaa2_dev->intr_handle;
670
671         PMD_INIT_FUNC_TRACE();
672
673         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
674         if (ret) {
675                 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
676                              ret, priv->hw_id);
677                 return ret;
678         }
679
680         /* Power up the phy. Needed to make the link go UP */
681         dpaa2_dev_set_link_up(dev);
682
683         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
684                             DPNI_QUEUE_TX, &qdid);
685         if (ret) {
686                 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
687                 return ret;
688         }
689         priv->qdid = qdid;
690
691         for (i = 0; i < data->nb_rx_queues; i++) {
692                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
693                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
694                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
695                                        dpaa2_q->flow_id, &cfg, &qid);
696                 if (ret) {
697                         PMD_INIT_LOG(ERR, "Error to get flow "
698                                      "information Error code = %d\n", ret);
699                         return ret;
700                 }
701                 dpaa2_q->fqid = qid.fqid;
702         }
703
704         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
705                                DPNI_OFF_RX_L3_CSUM, true);
706         if (ret) {
707                 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
708                 return ret;
709         }
710
711         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
712                                DPNI_OFF_RX_L4_CSUM, true);
713         if (ret) {
714                 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
715                 return ret;
716         }
717
718         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
719                                DPNI_OFF_TX_L3_CSUM, true);
720         if (ret) {
721                 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
722                 return ret;
723         }
724
725         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
726                                DPNI_OFF_TX_L4_CSUM, true);
727         if (ret) {
728                 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
729                 return ret;
730         }
731
732         /*checksum errors, send them to normal path and set it in annotation */
733         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
734
735         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
736         err_cfg.set_frame_annotation = true;
737
738         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
739                                        priv->token, &err_cfg);
740         if (ret) {
741                 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
742                              "code = %d\n", ret);
743                 return ret;
744         }
745         /* VLAN Offload Settings */
746         if (priv->max_vlan_filters)
747                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
748
749         /* if the interrupts were configured on this devices*/
750         if (intr_handle && (intr_handle->fd) &&
751             (dev->data->dev_conf.intr_conf.lsc != 0)) {
752                 /* Registering LSC interrupt handler */
753                 rte_intr_callback_register(intr_handle,
754                                            dpaa2_interrupt_handler,
755                                            (void *)dev);
756
757                 /* enable vfio intr/eventfd mapping
758                  * Interrupt index 0 is required, so we can not use
759                  * rte_intr_enable.
760                  */
761                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
762
763                 /* enable dpni_irqs */
764                 dpaa2_eth_setup_irqs(dev, 1);
765         }
766
767         return 0;
768 }
769
770 /**
771  *  This routine disables all traffic on the adapter by issuing a
772  *  global reset on the MAC.
773  */
774 static void
775 dpaa2_dev_stop(struct rte_eth_dev *dev)
776 {
777         struct dpaa2_dev_priv *priv = dev->data->dev_private;
778         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
779         int ret;
780         struct rte_eth_link link;
781         struct rte_intr_handle *intr_handle = dev->intr_handle;
782
783         PMD_INIT_FUNC_TRACE();
784
785         /* reset interrupt callback  */
786         if (intr_handle && (intr_handle->fd) &&
787             (dev->data->dev_conf.intr_conf.lsc != 0)) {
788                 /*disable dpni irqs */
789                 dpaa2_eth_setup_irqs(dev, 0);
790
791                 /* disable vfio intr before callback unregister */
792                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
793
794                 /* Unregistering LSC interrupt handler */
795                 rte_intr_callback_unregister(intr_handle,
796                                              dpaa2_interrupt_handler,
797                                              (void *)dev);
798         }
799
800         dpaa2_dev_set_link_down(dev);
801
802         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
803         if (ret) {
804                 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
805                              ret, priv->hw_id);
806                 return;
807         }
808
809         /* clear the recorded link status */
810         memset(&link, 0, sizeof(link));
811         dpaa2_dev_atomic_write_link_status(dev, &link);
812 }
813
814 static void
815 dpaa2_dev_close(struct rte_eth_dev *dev)
816 {
817         struct rte_eth_dev_data *data = dev->data;
818         struct dpaa2_dev_priv *priv = dev->data->dev_private;
819         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
820         int i, ret;
821         struct rte_eth_link link;
822         struct dpaa2_queue *dpaa2_q;
823
824         PMD_INIT_FUNC_TRACE();
825
826         for (i = 0; i < data->nb_tx_queues; i++) {
827                 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
828                 if (!dpaa2_q->cscn) {
829                         rte_free(dpaa2_q->cscn);
830                         dpaa2_q->cscn = NULL;
831                 }
832         }
833
834         /* Clean the device first */
835         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
836         if (ret) {
837                 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
838                              " error code %d\n", ret);
839                 return;
840         }
841
842         memset(&link, 0, sizeof(link));
843         dpaa2_dev_atomic_write_link_status(dev, &link);
844 }
845
846 static void
847 dpaa2_dev_promiscuous_enable(
848                 struct rte_eth_dev *dev)
849 {
850         int ret;
851         struct dpaa2_dev_priv *priv = dev->data->dev_private;
852         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
853
854         PMD_INIT_FUNC_TRACE();
855
856         if (dpni == NULL) {
857                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
858                 return;
859         }
860
861         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
862         if (ret < 0)
863                 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
864
865         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
866         if (ret < 0)
867                 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
868 }
869
870 static void
871 dpaa2_dev_promiscuous_disable(
872                 struct rte_eth_dev *dev)
873 {
874         int ret;
875         struct dpaa2_dev_priv *priv = dev->data->dev_private;
876         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
877
878         PMD_INIT_FUNC_TRACE();
879
880         if (dpni == NULL) {
881                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
882                 return;
883         }
884
885         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
886         if (ret < 0)
887                 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
888
889         if (dev->data->all_multicast == 0) {
890                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
891                                                  priv->token, false);
892                 if (ret < 0)
893                         RTE_LOG(ERR, PMD,
894                                 "Unable to disable M promisc mode %d\n",
895                                 ret);
896         }
897 }
898
899 static void
900 dpaa2_dev_allmulticast_enable(
901                 struct rte_eth_dev *dev)
902 {
903         int ret;
904         struct dpaa2_dev_priv *priv = dev->data->dev_private;
905         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
906
907         PMD_INIT_FUNC_TRACE();
908
909         if (dpni == NULL) {
910                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
911                 return;
912         }
913
914         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
915         if (ret < 0)
916                 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
917 }
918
919 static void
920 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
921 {
922         int ret;
923         struct dpaa2_dev_priv *priv = dev->data->dev_private;
924         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
925
926         PMD_INIT_FUNC_TRACE();
927
928         if (dpni == NULL) {
929                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
930                 return;
931         }
932
933         /* must remain on for all promiscuous */
934         if (dev->data->promiscuous == 1)
935                 return;
936
937         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
938         if (ret < 0)
939                 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
940 }
941
942 static int
943 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
944 {
945         int ret;
946         struct dpaa2_dev_priv *priv = dev->data->dev_private;
947         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
948         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
949
950         PMD_INIT_FUNC_TRACE();
951
952         if (dpni == NULL) {
953                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
954                 return -EINVAL;
955         }
956
957         /* check that mtu is within the allowed range */
958         if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
959                 return -EINVAL;
960
961         if (frame_size > ETHER_MAX_LEN)
962                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
963         else
964                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
965
966         /* Set the Max Rx frame length as 'mtu' +
967          * Maximum Ethernet header length
968          */
969         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
970                                         mtu + ETH_VLAN_HLEN);
971         if (ret) {
972                 PMD_DRV_LOG(ERR, "setting the max frame length failed");
973                 return -1;
974         }
975         PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
976         return 0;
977 }
978
979 static int
980 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
981                        struct ether_addr *addr,
982                        __rte_unused uint32_t index,
983                        __rte_unused uint32_t pool)
984 {
985         int ret;
986         struct dpaa2_dev_priv *priv = dev->data->dev_private;
987         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
988
989         PMD_INIT_FUNC_TRACE();
990
991         if (dpni == NULL) {
992                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
993                 return -1;
994         }
995
996         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
997                                 priv->token, addr->addr_bytes);
998         if (ret)
999                 RTE_LOG(ERR, PMD,
1000                         "error: Adding the MAC ADDR failed: err = %d\n", ret);
1001         return 0;
1002 }
1003
1004 static void
1005 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1006                           uint32_t index)
1007 {
1008         int ret;
1009         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1010         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1011         struct rte_eth_dev_data *data = dev->data;
1012         struct ether_addr *macaddr;
1013
1014         PMD_INIT_FUNC_TRACE();
1015
1016         macaddr = &data->mac_addrs[index];
1017
1018         if (dpni == NULL) {
1019                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1020                 return;
1021         }
1022
1023         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1024                                    priv->token, macaddr->addr_bytes);
1025         if (ret)
1026                 RTE_LOG(ERR, PMD,
1027                         "error: Removing the MAC ADDR failed: err = %d\n", ret);
1028 }
1029
1030 static void
1031 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1032                        struct ether_addr *addr)
1033 {
1034         int ret;
1035         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1036         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1037
1038         PMD_INIT_FUNC_TRACE();
1039
1040         if (dpni == NULL) {
1041                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1042                 return;
1043         }
1044
1045         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1046                                         priv->token, addr->addr_bytes);
1047
1048         if (ret)
1049                 RTE_LOG(ERR, PMD,
1050                         "error: Setting the MAC ADDR failed %d\n", ret);
1051 }
1052 static
1053 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1054                          struct rte_eth_stats *stats)
1055 {
1056         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1057         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1058         int32_t  retcode;
1059         uint8_t page0 = 0, page1 = 1, page2 = 2;
1060         union dpni_statistics value;
1061
1062         memset(&value, 0, sizeof(union dpni_statistics));
1063
1064         PMD_INIT_FUNC_TRACE();
1065
1066         if (!dpni) {
1067                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1068                 return;
1069         }
1070
1071         if (!stats) {
1072                 RTE_LOG(ERR, PMD, "stats is NULL\n");
1073                 return;
1074         }
1075
1076         /*Get Counters from page_0*/
1077         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1078                                       page0, 0, &value);
1079         if (retcode)
1080                 goto err;
1081
1082         stats->ipackets = value.page_0.ingress_all_frames;
1083         stats->ibytes = value.page_0.ingress_all_bytes;
1084
1085         /*Get Counters from page_1*/
1086         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1087                                       page1, 0, &value);
1088         if (retcode)
1089                 goto err;
1090
1091         stats->opackets = value.page_1.egress_all_frames;
1092         stats->obytes = value.page_1.egress_all_bytes;
1093
1094         /*Get Counters from page_2*/
1095         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1096                                       page2, 0, &value);
1097         if (retcode)
1098                 goto err;
1099
1100         /* Ingress drop frame count due to configured rules */
1101         stats->ierrors = value.page_2.ingress_filtered_frames;
1102         /* Ingress drop frame count due to error */
1103         stats->ierrors += value.page_2.ingress_discarded_frames;
1104
1105         stats->oerrors = value.page_2.egress_discarded_frames;
1106         stats->imissed = value.page_2.ingress_nobuffer_discards;
1107
1108         return;
1109
1110 err:
1111         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
1112         return;
1113 };
1114
1115 static int
1116 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1117                      unsigned int n)
1118 {
1119         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1120         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1121         int32_t  retcode;
1122         union dpni_statistics value[3] = {};
1123         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1124
1125         if (xstats == NULL)
1126                 return 0;
1127
1128         if (n < num)
1129                 return num;
1130
1131         /* Get Counters from page_0*/
1132         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1133                                       0, 0, &value[0]);
1134         if (retcode)
1135                 goto err;
1136
1137         /* Get Counters from page_1*/
1138         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1139                                       1, 0, &value[1]);
1140         if (retcode)
1141                 goto err;
1142
1143         /* Get Counters from page_2*/
1144         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1145                                       2, 0, &value[2]);
1146         if (retcode)
1147                 goto err;
1148
1149         for (i = 0; i < num; i++) {
1150                 xstats[i].id = i;
1151                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1152                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1153         }
1154         return i;
1155 err:
1156         RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode);
1157         return retcode;
1158 }
1159
1160 static int
1161 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1162                        struct rte_eth_xstat_name *xstats_names,
1163                        __rte_unused unsigned int limit)
1164 {
1165         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1166
1167         if (xstats_names != NULL)
1168                 for (i = 0; i < stat_cnt; i++)
1169                         snprintf(xstats_names[i].name,
1170                                  sizeof(xstats_names[i].name),
1171                                  "%s",
1172                                  dpaa2_xstats_strings[i].name);
1173
1174         return stat_cnt;
1175 }
1176
1177 static int
1178 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1179                        uint64_t *values, unsigned int n)
1180 {
1181         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1182         uint64_t values_copy[stat_cnt];
1183
1184         if (!ids) {
1185                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1186                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1187                 int32_t  retcode;
1188                 union dpni_statistics value[3] = {};
1189
1190                 if (n < stat_cnt)
1191                         return stat_cnt;
1192
1193                 if (!values)
1194                         return 0;
1195
1196                 /* Get Counters from page_0*/
1197                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1198                                               0, 0, &value[0]);
1199                 if (retcode)
1200                         return 0;
1201
1202                 /* Get Counters from page_1*/
1203                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1204                                               1, 0, &value[1]);
1205                 if (retcode)
1206                         return 0;
1207
1208                 /* Get Counters from page_2*/
1209                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1210                                               2, 0, &value[2]);
1211                 if (retcode)
1212                         return 0;
1213
1214                 for (i = 0; i < stat_cnt; i++) {
1215                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1216                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1217                 }
1218                 return stat_cnt;
1219         }
1220
1221         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1222
1223         for (i = 0; i < n; i++) {
1224                 if (ids[i] >= stat_cnt) {
1225                         PMD_INIT_LOG(ERR, "id value isn't valid");
1226                         return -1;
1227                 }
1228                 values[i] = values_copy[ids[i]];
1229         }
1230         return n;
1231 }
1232
1233 static int
1234 dpaa2_xstats_get_names_by_id(
1235         struct rte_eth_dev *dev,
1236         struct rte_eth_xstat_name *xstats_names,
1237         const uint64_t *ids,
1238         unsigned int limit)
1239 {
1240         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1241         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1242
1243         if (!ids)
1244                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1245
1246         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1247
1248         for (i = 0; i < limit; i++) {
1249                 if (ids[i] >= stat_cnt) {
1250                         PMD_INIT_LOG(ERR, "id value isn't valid");
1251                         return -1;
1252                 }
1253                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1254         }
1255         return limit;
1256 }
1257
1258 static void
1259 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1260 {
1261         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1262         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1263         int32_t  retcode;
1264
1265         PMD_INIT_FUNC_TRACE();
1266
1267         if (dpni == NULL) {
1268                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1269                 return;
1270         }
1271
1272         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1273         if (retcode)
1274                 goto error;
1275
1276         return;
1277
1278 error:
1279         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
1280         return;
1281 };
1282
1283 /* return 0 means link status changed, -1 means not changed */
1284 static int
1285 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1286                         int wait_to_complete __rte_unused)
1287 {
1288         int ret;
1289         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1290         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1291         struct rte_eth_link link, old;
1292         struct dpni_link_state state = {0};
1293
1294         if (dpni == NULL) {
1295                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1296                 return 0;
1297         }
1298         memset(&old, 0, sizeof(old));
1299         dpaa2_dev_atomic_read_link_status(dev, &old);
1300
1301         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1302         if (ret < 0) {
1303                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1304                 return -1;
1305         }
1306
1307         if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
1308                 RTE_LOG(DEBUG, PMD, "No change in status\n");
1309                 return -1;
1310         }
1311
1312         memset(&link, 0, sizeof(struct rte_eth_link));
1313         link.link_status = state.up;
1314         link.link_speed = state.rate;
1315
1316         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1317                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1318         else
1319                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1320
1321         dpaa2_dev_atomic_write_link_status(dev, &link);
1322
1323         if (link.link_status)
1324                 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
1325         else
1326                 PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id);
1327         return 0;
1328 }
1329
1330 /**
1331  * Toggle the DPNI to enable, if not already enabled.
1332  * This is not strictly PHY up/down - it is more of logical toggling.
1333  */
1334 static int
1335 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1336 {
1337         int ret = -EINVAL;
1338         struct dpaa2_dev_priv *priv;
1339         struct fsl_mc_io *dpni;
1340         int en = 0;
1341         struct dpni_link_state state = {0};
1342
1343         priv = dev->data->dev_private;
1344         dpni = (struct fsl_mc_io *)priv->hw;
1345
1346         if (dpni == NULL) {
1347                 RTE_LOG(ERR, PMD, "DPNI is NULL\n");
1348                 return ret;
1349         }
1350
1351         /* Check if DPNI is currently enabled */
1352         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1353         if (ret) {
1354                 /* Unable to obtain dpni status; Not continuing */
1355                 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1356                 return -EINVAL;
1357         }
1358
1359         /* Enable link if not already enabled */
1360         if (!en) {
1361                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1362                 if (ret) {
1363                         PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1364                         return -EINVAL;
1365                 }
1366         }
1367         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1368         if (ret < 0) {
1369                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1370                 return -1;
1371         }
1372
1373         /* changing tx burst function to start enqueues */
1374         dev->tx_pkt_burst = dpaa2_dev_tx;
1375         dev->data->dev_link.link_status = state.up;
1376
1377         if (state.up)
1378                 PMD_DRV_LOG(INFO, "Port %d Link is set as UP",
1379                             dev->data->port_id);
1380         else
1381                 PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id);
1382         return ret;
1383 }
1384
1385 /**
1386  * Toggle the DPNI to disable, if not already disabled.
1387  * This is not strictly PHY up/down - it is more of logical toggling.
1388  */
1389 static int
1390 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1391 {
1392         int ret = -EINVAL;
1393         struct dpaa2_dev_priv *priv;
1394         struct fsl_mc_io *dpni;
1395         int dpni_enabled = 0;
1396         int retries = 10;
1397
1398         PMD_INIT_FUNC_TRACE();
1399
1400         priv = dev->data->dev_private;
1401         dpni = (struct fsl_mc_io *)priv->hw;
1402
1403         if (dpni == NULL) {
1404                 RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
1405                 return ret;
1406         }
1407
1408         /*changing  tx burst function to avoid any more enqueues */
1409         dev->tx_pkt_burst = dummy_dev_tx;
1410
1411         /* Loop while dpni_disable() attempts to drain the egress FQs
1412          * and confirm them back to us.
1413          */
1414         do {
1415                 ret = dpni_disable(dpni, 0, priv->token);
1416                 if (ret) {
1417                         PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
1418                         return ret;
1419                 }
1420                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1421                 if (ret) {
1422                         PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
1423                         return ret;
1424                 }
1425                 if (dpni_enabled)
1426                         /* Allow the MC some slack */
1427                         rte_delay_us(100 * 1000);
1428         } while (dpni_enabled && --retries);
1429
1430         if (!retries) {
1431                 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
1432                 /* todo- we may have to manually cleanup queues.
1433                  */
1434         } else {
1435                 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
1436                             dev->data->port_id);
1437         }
1438
1439         dev->data->dev_link.link_status = 0;
1440
1441         return ret;
1442 }
1443
1444 static int
1445 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1446 {
1447         int ret = -EINVAL;
1448         struct dpaa2_dev_priv *priv;
1449         struct fsl_mc_io *dpni;
1450         struct dpni_link_state state = {0};
1451
1452         PMD_INIT_FUNC_TRACE();
1453
1454         priv = dev->data->dev_private;
1455         dpni = (struct fsl_mc_io *)priv->hw;
1456
1457         if (dpni == NULL || fc_conf == NULL) {
1458                 RTE_LOG(ERR, PMD, "device not configured\n");
1459                 return ret;
1460         }
1461
1462         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1463         if (ret) {
1464                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
1465                 return ret;
1466         }
1467
1468         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1469         if (state.options & DPNI_LINK_OPT_PAUSE) {
1470                 /* DPNI_LINK_OPT_PAUSE set
1471                  *  if ASYM_PAUSE not set,
1472                  *      RX Side flow control (handle received Pause frame)
1473                  *      TX side flow control (send Pause frame)
1474                  *  if ASYM_PAUSE set,
1475                  *      RX Side flow control (handle received Pause frame)
1476                  *      No TX side flow control (send Pause frame disabled)
1477                  */
1478                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1479                         fc_conf->mode = RTE_FC_FULL;
1480                 else
1481                         fc_conf->mode = RTE_FC_RX_PAUSE;
1482         } else {
1483                 /* DPNI_LINK_OPT_PAUSE not set
1484                  *  if ASYM_PAUSE set,
1485                  *      TX side flow control (send Pause frame)
1486                  *      No RX side flow control (No action on pause frame rx)
1487                  *  if ASYM_PAUSE not set,
1488                  *      Flow control disabled
1489                  */
1490                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1491                         fc_conf->mode = RTE_FC_TX_PAUSE;
1492                 else
1493                         fc_conf->mode = RTE_FC_NONE;
1494         }
1495
1496         return ret;
1497 }
1498
1499 static int
1500 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1501 {
1502         int ret = -EINVAL;
1503         struct dpaa2_dev_priv *priv;
1504         struct fsl_mc_io *dpni;
1505         struct dpni_link_state state = {0};
1506         struct dpni_link_cfg cfg = {0};
1507
1508         PMD_INIT_FUNC_TRACE();
1509
1510         priv = dev->data->dev_private;
1511         dpni = (struct fsl_mc_io *)priv->hw;
1512
1513         if (dpni == NULL) {
1514                 RTE_LOG(ERR, PMD, "dpni is NULL\n");
1515                 return ret;
1516         }
1517
1518         /* It is necessary to obtain the current state before setting fc_conf
1519          * as MC would return error in case rate, autoneg or duplex values are
1520          * different.
1521          */
1522         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1523         if (ret) {
1524                 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
1525                 return -1;
1526         }
1527
1528         /* Disable link before setting configuration */
1529         dpaa2_dev_set_link_down(dev);
1530
1531         /* Based on fc_conf, update cfg */
1532         cfg.rate = state.rate;
1533         cfg.options = state.options;
1534
1535         /* update cfg with fc_conf */
1536         switch (fc_conf->mode) {
1537         case RTE_FC_FULL:
1538                 /* Full flow control;
1539                  * OPT_PAUSE set, ASYM_PAUSE not set
1540                  */
1541                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1542                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1543                 break;
1544         case RTE_FC_TX_PAUSE:
1545                 /* Enable RX flow control
1546                  * OPT_PAUSE not set;
1547                  * ASYM_PAUSE set;
1548                  */
1549                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1550                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1551                 break;
1552         case RTE_FC_RX_PAUSE:
1553                 /* Enable TX Flow control
1554                  * OPT_PAUSE set
1555                  * ASYM_PAUSE set
1556                  */
1557                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1558                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1559                 break;
1560         case RTE_FC_NONE:
1561                 /* Disable Flow control
1562                  * OPT_PAUSE not set
1563                  * ASYM_PAUSE not set
1564                  */
1565                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1566                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1567                 break;
1568         default:
1569                 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
1570                         fc_conf->mode);
1571                 return -1;
1572         }
1573
1574         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1575         if (ret)
1576                 RTE_LOG(ERR, PMD,
1577                         "Unable to set Link configuration (err=%d)\n",
1578                         ret);
1579
1580         /* Enable link */
1581         dpaa2_dev_set_link_up(dev);
1582
1583         return ret;
1584 }
1585
1586 static int
1587 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1588                           struct rte_eth_rss_conf *rss_conf)
1589 {
1590         struct rte_eth_dev_data *data = dev->data;
1591         struct rte_eth_conf *eth_conf = &data->dev_conf;
1592         int ret;
1593
1594         PMD_INIT_FUNC_TRACE();
1595
1596         if (rss_conf->rss_hf) {
1597                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1598                 if (ret) {
1599                         PMD_INIT_LOG(ERR, "unable to set flow dist");
1600                         return ret;
1601                 }
1602         } else {
1603                 ret = dpaa2_remove_flow_dist(dev, 0);
1604                 if (ret) {
1605                         PMD_INIT_LOG(ERR, "unable to remove flow dist");
1606                         return ret;
1607                 }
1608         }
1609         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1610         return 0;
1611 }
1612
1613 static int
1614 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1615                             struct rte_eth_rss_conf *rss_conf)
1616 {
1617         struct rte_eth_dev_data *data = dev->data;
1618         struct rte_eth_conf *eth_conf = &data->dev_conf;
1619
1620         /* dpaa2 does not support rss_key, so length should be 0*/
1621         rss_conf->rss_key_len = 0;
1622         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1623         return 0;
1624 }
1625
1626 static struct eth_dev_ops dpaa2_ethdev_ops = {
1627         .dev_configure    = dpaa2_eth_dev_configure,
1628         .dev_start            = dpaa2_dev_start,
1629         .dev_stop             = dpaa2_dev_stop,
1630         .dev_close            = dpaa2_dev_close,
1631         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1632         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1633         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1634         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1635         .dev_set_link_up      = dpaa2_dev_set_link_up,
1636         .dev_set_link_down    = dpaa2_dev_set_link_down,
1637         .link_update       = dpaa2_dev_link_update,
1638         .stats_get             = dpaa2_dev_stats_get,
1639         .xstats_get            = dpaa2_dev_xstats_get,
1640         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
1641         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1642         .xstats_get_names      = dpaa2_xstats_get_names,
1643         .stats_reset       = dpaa2_dev_stats_reset,
1644         .xstats_reset         = dpaa2_dev_stats_reset,
1645         .fw_version_get    = dpaa2_fw_version_get,
1646         .dev_infos_get     = dpaa2_dev_info_get,
1647         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1648         .mtu_set           = dpaa2_dev_mtu_set,
1649         .vlan_filter_set      = dpaa2_vlan_filter_set,
1650         .vlan_offload_set     = dpaa2_vlan_offload_set,
1651         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1652         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1653         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1654         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1655         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
1656         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
1657         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1658         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1659         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1660         .rss_hash_update      = dpaa2_dev_rss_hash_update,
1661         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
1662 };
1663
1664 static int
1665 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1666 {
1667         struct rte_device *dev = eth_dev->device;
1668         struct rte_dpaa2_device *dpaa2_dev;
1669         struct fsl_mc_io *dpni_dev;
1670         struct dpni_attr attr;
1671         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1672         struct dpni_buffer_layout layout;
1673         int ret, hw_id;
1674
1675         PMD_INIT_FUNC_TRACE();
1676
1677         /* For secondary processes, the primary has done all the work */
1678         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1679                 return 0;
1680
1681         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1682
1683         hw_id = dpaa2_dev->object_id;
1684
1685         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1686         if (!dpni_dev) {
1687                 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
1688                 return -1;
1689         }
1690
1691         dpni_dev->regs = rte_mcp_ptr_list[0];
1692         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1693         if (ret) {
1694                 PMD_INIT_LOG(ERR,
1695                              "Failure in opening dpni@%d with err code %d\n",
1696                              hw_id, ret);
1697                 rte_free(dpni_dev);
1698                 return -1;
1699         }
1700
1701         /* Clean the device first */
1702         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1703         if (ret) {
1704                 PMD_INIT_LOG(ERR,
1705                              "Failure cleaning dpni@%d with err code %d\n",
1706                              hw_id, ret);
1707                 goto init_err;
1708         }
1709
1710         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1711         if (ret) {
1712                 PMD_INIT_LOG(ERR,
1713                              "Failure in get dpni@%d attribute, err code %d\n",
1714                              hw_id, ret);
1715                 goto init_err;
1716         }
1717
1718         priv->num_rx_tc = attr.num_rx_tcs;
1719
1720         /* Resetting the "num_rx_queues" to equal number of queues in first TC
1721          * as only one TC is supported on Rx Side. Once Multiple TCs will be
1722          * in use for Rx processing then this will be changed or removed.
1723          */
1724         priv->nb_rx_queues = attr.num_queues;
1725
1726         /* Using number of TX queues as number of TX TCs */
1727         priv->nb_tx_queues = attr.num_tx_tcs;
1728
1729         PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
1730                     priv->num_tc, priv->nb_rx_queues, priv->nb_tx_queues);
1731
1732         priv->hw = dpni_dev;
1733         priv->hw_id = hw_id;
1734         priv->options = attr.options;
1735         priv->max_mac_filters = attr.mac_filter_entries;
1736         priv->max_vlan_filters = attr.vlan_filter_entries;
1737         priv->flags = 0;
1738
1739         /* Allocate memory for hardware structure for queues */
1740         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1741         if (ret) {
1742                 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
1743                 goto init_err;
1744         }
1745
1746         /* Allocate memory for storing MAC addresses */
1747         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
1748                 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
1749         if (eth_dev->data->mac_addrs == NULL) {
1750                 PMD_INIT_LOG(ERR,
1751                    "Failed to allocate %d bytes needed to store MAC addresses",
1752                              ETHER_ADDR_LEN * attr.mac_filter_entries);
1753                 ret = -ENOMEM;
1754                 goto init_err;
1755         }
1756
1757         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1758                                         priv->token,
1759                         (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
1760         if (ret) {
1761                 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
1762                              ret);
1763                 goto init_err;
1764         }
1765
1766         /* ... tx buffer layout ... */
1767         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1768         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1769         layout.pass_frame_status = 1;
1770         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1771                                      DPNI_QUEUE_TX, &layout);
1772         if (ret) {
1773                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
1774                              ret);
1775                 goto init_err;
1776         }
1777
1778         /* ... tx-conf and error buffer layout ... */
1779         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1780         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1781         layout.pass_frame_status = 1;
1782         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1783                                      DPNI_QUEUE_TX_CONFIRM, &layout);
1784         if (ret) {
1785                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
1786                              ret);
1787                 goto init_err;
1788         }
1789
1790         eth_dev->dev_ops = &dpaa2_ethdev_ops;
1791         eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1792
1793         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1794         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1795         rte_fslmc_vfio_dmamap();
1796
1797         return 0;
1798 init_err:
1799         dpaa2_dev_uninit(eth_dev);
1800         return ret;
1801 }
1802
1803 static int
1804 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
1805 {
1806         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1807         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1808         int i, ret;
1809         struct dpaa2_queue *dpaa2_q;
1810
1811         PMD_INIT_FUNC_TRACE();
1812
1813         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1814                 return 0;
1815
1816         if (!dpni) {
1817                 PMD_INIT_LOG(WARNING, "Already closed or not started");
1818                 return -1;
1819         }
1820
1821         dpaa2_dev_close(eth_dev);
1822
1823         if (priv->rx_vq[0]) {
1824                 /* cleaning up queue storage */
1825                 for (i = 0; i < priv->nb_rx_queues; i++) {
1826                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1827                         if (dpaa2_q->q_storage)
1828                                 rte_free(dpaa2_q->q_storage);
1829                 }
1830                 /*free the all queue memory */
1831                 rte_free(priv->rx_vq[0]);
1832                 priv->rx_vq[0] = NULL;
1833         }
1834
1835         /* free memory for storing MAC addresses */
1836         if (eth_dev->data->mac_addrs) {
1837                 rte_free(eth_dev->data->mac_addrs);
1838                 eth_dev->data->mac_addrs = NULL;
1839         }
1840
1841         /* Close the device at underlying layer*/
1842         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1843         if (ret) {
1844                 PMD_INIT_LOG(ERR,
1845                              "Failure closing dpni device with err code %d\n",
1846                              ret);
1847         }
1848
1849         /* Free the allocated memory for ethernet private data and dpni*/
1850         priv->hw = NULL;
1851         rte_free(dpni);
1852
1853         eth_dev->dev_ops = NULL;
1854         eth_dev->rx_pkt_burst = NULL;
1855         eth_dev->tx_pkt_burst = NULL;
1856
1857         return 0;
1858 }
1859
1860 static int
1861 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
1862                 struct rte_dpaa2_device *dpaa2_dev)
1863 {
1864         struct rte_eth_dev *eth_dev;
1865         int diag;
1866
1867         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1868                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
1869                 if (!eth_dev)
1870                         return -ENODEV;
1871                 eth_dev->data->dev_private = rte_zmalloc(
1872                                                 "ethdev private structure",
1873                                                 sizeof(struct dpaa2_dev_priv),
1874                                                 RTE_CACHE_LINE_SIZE);
1875                 if (eth_dev->data->dev_private == NULL) {
1876                         PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
1877                                      " private port data\n");
1878                         rte_eth_dev_release_port(eth_dev);
1879                         return -ENOMEM;
1880                 }
1881         } else {
1882                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
1883                 if (!eth_dev)
1884                         return -ENODEV;
1885         }
1886
1887         eth_dev->device = &dpaa2_dev->device;
1888         eth_dev->device->driver = &dpaa2_drv->driver;
1889
1890         dpaa2_dev->eth_dev = eth_dev;
1891         eth_dev->data->rx_mbuf_alloc_failed = 0;
1892
1893         /* Invoke PMD device initialization function */
1894         diag = dpaa2_dev_init(eth_dev);
1895         if (diag == 0)
1896                 return 0;
1897
1898         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1899                 rte_free(eth_dev->data->dev_private);
1900         rte_eth_dev_release_port(eth_dev);
1901         return diag;
1902 }
1903
1904 static int
1905 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1906 {
1907         struct rte_eth_dev *eth_dev;
1908
1909         eth_dev = dpaa2_dev->eth_dev;
1910         dpaa2_dev_uninit(eth_dev);
1911
1912         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1913                 rte_free(eth_dev->data->dev_private);
1914         rte_eth_dev_release_port(eth_dev);
1915
1916         return 0;
1917 }
1918
1919 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1920         .drv_type = DPAA2_ETH,
1921         .probe = rte_dpaa2_probe,
1922         .remove = rte_dpaa2_remove,
1923 };
1924
1925 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);