net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21 #include "rte_dpaa2_mempool.h"
22
23 #include "dpaa2_pmd_logs.h"
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <dpaa2_hw_dpio.h>
28 #include <mc/fsl_dpmng.h>
29 #include "dpaa2_ethdev.h"
30 #include "dpaa2_sparser.h"
31 #include <fsl_qbman_debug.h>
32
33 #define DRIVER_LOOPBACK_MODE "drv_loopback"
34 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
35 #define DRIVER_TX_CONF "drv_tx_conf"
36 #define DRIVER_ERROR_QUEUE  "drv_err_queue"
37 #define CHECK_INTERVAL         100  /* 100ms */
38 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
39
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42                 RTE_ETH_RX_OFFLOAD_CHECKSUM |
43                 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
44                 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
45                 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
46                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
47                 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
48                 RTE_ETH_RX_OFFLOAD_TIMESTAMP;
49
50 /* Rx offloads which cannot be disabled */
51 static uint64_t dev_rx_offloads_nodis =
52                 RTE_ETH_RX_OFFLOAD_RSS_HASH |
53                 RTE_ETH_RX_OFFLOAD_SCATTER;
54
55 /* Supported Tx offloads */
56 static uint64_t dev_tx_offloads_sup =
57                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
58                 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
59                 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
60                 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
61                 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
62                 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
63                 RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
64                 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
65
66 /* Tx offloads which cannot be disabled */
67 static uint64_t dev_tx_offloads_nodis =
68                 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
69
70 /* enable timestamp in mbuf */
71 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
72 uint64_t dpaa2_timestamp_rx_dynflag;
73 int dpaa2_timestamp_dynfield_offset = -1;
74
75 /* Enable error queue */
76 bool dpaa2_enable_err_queue;
77
78 #define MAX_NB_RX_DESC          11264
79 int total_nb_rx_desc;
80
81 struct rte_dpaa2_xstats_name_off {
82         char name[RTE_ETH_XSTATS_NAME_SIZE];
83         uint8_t page_id; /* dpni statistics page id */
84         uint8_t stats_id; /* stats id in the given page */
85 };
86
87 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
88         {"ingress_multicast_frames", 0, 2},
89         {"ingress_multicast_bytes", 0, 3},
90         {"ingress_broadcast_frames", 0, 4},
91         {"ingress_broadcast_bytes", 0, 5},
92         {"egress_multicast_frames", 1, 2},
93         {"egress_multicast_bytes", 1, 3},
94         {"egress_broadcast_frames", 1, 4},
95         {"egress_broadcast_bytes", 1, 5},
96         {"ingress_filtered_frames", 2, 0},
97         {"ingress_discarded_frames", 2, 1},
98         {"ingress_nobuffer_discards", 2, 2},
99         {"egress_discarded_frames", 2, 3},
100         {"egress_confirmed_frames", 2, 4},
101         {"cgr_reject_frames", 4, 0},
102         {"cgr_reject_bytes", 4, 1},
103 };
104
105 static struct rte_dpaa2_driver rte_dpaa2_pmd;
106 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
107                                  int wait_to_complete);
108 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
109 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
110 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
111
112 static int
113 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
114 {
115         int ret;
116         struct dpaa2_dev_priv *priv = dev->data->dev_private;
117         struct fsl_mc_io *dpni = dev->process_private;
118
119         PMD_INIT_FUNC_TRACE();
120
121         if (dpni == NULL) {
122                 DPAA2_PMD_ERR("dpni is NULL");
123                 return -1;
124         }
125
126         if (on)
127                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
128                                        vlan_id, 0, 0, 0);
129         else
130                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
131                                           priv->token, vlan_id);
132
133         if (ret < 0)
134                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
135                               ret, vlan_id, priv->hw_id);
136
137         return ret;
138 }
139
140 static int
141 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
142 {
143         struct dpaa2_dev_priv *priv = dev->data->dev_private;
144         struct fsl_mc_io *dpni = dev->process_private;
145         int ret = 0;
146
147         PMD_INIT_FUNC_TRACE();
148
149         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
150                 /* VLAN Filter not available */
151                 if (!priv->max_vlan_filters) {
152                         DPAA2_PMD_INFO("VLAN filter not available");
153                         return -ENOTSUP;
154                 }
155
156                 if (dev->data->dev_conf.rxmode.offloads &
157                         RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
158                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
159                                                       priv->token, true);
160                 else
161                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
162                                                       priv->token, false);
163                 if (ret < 0)
164                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
165         }
166
167         return ret;
168 }
169
170 static int
171 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
172                       enum rte_vlan_type vlan_type __rte_unused,
173                       uint16_t tpid)
174 {
175         struct dpaa2_dev_priv *priv = dev->data->dev_private;
176         struct fsl_mc_io *dpni = dev->process_private;
177         int ret = -ENOTSUP;
178
179         PMD_INIT_FUNC_TRACE();
180
181         /* nothing to be done for standard vlan tpids */
182         if (tpid == 0x8100 || tpid == 0x88A8)
183                 return 0;
184
185         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
186                                    priv->token, tpid);
187         if (ret < 0)
188                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
189         /* if already configured tpids, remove them first */
190         if (ret == -EBUSY) {
191                 struct dpni_custom_tpid_cfg tpid_list = {0};
192
193                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
194                                    priv->token, &tpid_list);
195                 if (ret < 0)
196                         goto fail;
197                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
198                                    priv->token, tpid_list.tpid1);
199                 if (ret < 0)
200                         goto fail;
201                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
202                                            priv->token, tpid);
203         }
204 fail:
205         return ret;
206 }
207
208 static int
209 dpaa2_fw_version_get(struct rte_eth_dev *dev,
210                      char *fw_version,
211                      size_t fw_size)
212 {
213         int ret;
214         struct fsl_mc_io *dpni = dev->process_private;
215         struct mc_soc_version mc_plat_info = {0};
216         struct mc_version mc_ver_info = {0};
217
218         PMD_INIT_FUNC_TRACE();
219
220         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
221                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
222
223         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
224                 DPAA2_PMD_WARN("\tmc_get_version failed");
225
226         ret = snprintf(fw_version, fw_size,
227                        "%x-%d.%d.%d",
228                        mc_plat_info.svr,
229                        mc_ver_info.major,
230                        mc_ver_info.minor,
231                        mc_ver_info.revision);
232         if (ret < 0)
233                 return -EINVAL;
234
235         ret += 1; /* add the size of '\0' */
236         if (fw_size < (size_t)ret)
237                 return ret;
238         else
239                 return 0;
240 }
241
242 static int
243 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
244 {
245         struct dpaa2_dev_priv *priv = dev->data->dev_private;
246
247         PMD_INIT_FUNC_TRACE();
248
249         dev_info->max_mac_addrs = priv->max_mac_filters;
250         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
251         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
252         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
253         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
254         dev_info->rx_offload_capa = dev_rx_offloads_sup |
255                                         dev_rx_offloads_nodis;
256         dev_info->tx_offload_capa = dev_tx_offloads_sup |
257                                         dev_tx_offloads_nodis;
258         dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
259                         RTE_ETH_LINK_SPEED_2_5G |
260                         RTE_ETH_LINK_SPEED_10G;
261         dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
262
263         dev_info->max_hash_mac_addrs = 0;
264         dev_info->max_vfs = 0;
265         dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
266         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
267
268         dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
269         /* same is rx size for best perf */
270         dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
271
272         dev_info->default_rxportconf.nb_queues = 1;
273         dev_info->default_txportconf.nb_queues = 1;
274         dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
275         dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
276
277         if (dpaa2_svr_family == SVR_LX2160A) {
278                 dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
279                                 RTE_ETH_LINK_SPEED_40G |
280                                 RTE_ETH_LINK_SPEED_50G |
281                                 RTE_ETH_LINK_SPEED_100G;
282         }
283
284         return 0;
285 }
286
287 static int
288 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
289                         __rte_unused uint16_t queue_id,
290                         struct rte_eth_burst_mode *mode)
291 {
292         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
293         int ret = -EINVAL;
294         unsigned int i;
295         const struct burst_info {
296                 uint64_t flags;
297                 const char *output;
298         } rx_offload_map[] = {
299                         {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
300                         {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
301                         {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
302                         {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
303                         {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
304                         {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
305                         {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
306                         {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
307                         {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
308         };
309
310         /* Update Rx offload info */
311         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
312                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
313                         snprintf(mode->info, sizeof(mode->info), "%s",
314                                 rx_offload_map[i].output);
315                         ret = 0;
316                         break;
317                 }
318         }
319         return ret;
320 }
321
322 static int
323 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
324                         __rte_unused uint16_t queue_id,
325                         struct rte_eth_burst_mode *mode)
326 {
327         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
328         int ret = -EINVAL;
329         unsigned int i;
330         const struct burst_info {
331                 uint64_t flags;
332                 const char *output;
333         } tx_offload_map[] = {
334                         {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
335                         {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
336                         {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
337                         {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
338                         {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
339                         {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
340                         {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
341                         {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
342                         {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
343         };
344
345         /* Update Tx offload info */
346         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
347                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
348                         snprintf(mode->info, sizeof(mode->info), "%s",
349                                 tx_offload_map[i].output);
350                         ret = 0;
351                         break;
352                 }
353         }
354         return ret;
355 }
356
357 static int
358 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
359 {
360         struct dpaa2_dev_priv *priv = dev->data->dev_private;
361         uint16_t dist_idx;
362         uint32_t vq_id;
363         uint8_t num_rxqueue_per_tc;
364         struct dpaa2_queue *mc_q, *mcq;
365         uint32_t tot_queues;
366         int i;
367         struct dpaa2_queue *dpaa2_q;
368
369         PMD_INIT_FUNC_TRACE();
370
371         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
372         if (priv->flags & DPAA2_TX_CONF_ENABLE)
373                 tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
374         else
375                 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
376         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
377                           RTE_CACHE_LINE_SIZE);
378         if (!mc_q) {
379                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
380                 return -1;
381         }
382
383         for (i = 0; i < priv->nb_rx_queues; i++) {
384                 mc_q->eth_data = dev->data;
385                 priv->rx_vq[i] = mc_q++;
386                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
387                 dpaa2_q->q_storage = rte_malloc("dq_storage",
388                                         sizeof(struct queue_storage_info_t),
389                                         RTE_CACHE_LINE_SIZE);
390                 if (!dpaa2_q->q_storage)
391                         goto fail;
392
393                 memset(dpaa2_q->q_storage, 0,
394                        sizeof(struct queue_storage_info_t));
395                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
396                         goto fail;
397         }
398
399         if (dpaa2_enable_err_queue) {
400                 priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
401                         sizeof(struct dpaa2_queue), 0);
402                 if (!priv->rx_err_vq)
403                         goto fail;
404
405                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
406                 dpaa2_q->q_storage = rte_malloc("err_dq_storage",
407                                         sizeof(struct queue_storage_info_t) *
408                                         RTE_MAX_LCORE,
409                                         RTE_CACHE_LINE_SIZE);
410                 if (!dpaa2_q->q_storage)
411                         goto fail;
412
413                 memset(dpaa2_q->q_storage, 0,
414                        sizeof(struct queue_storage_info_t));
415                 for (i = 0; i < RTE_MAX_LCORE; i++)
416                         if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
417                                 goto fail;
418         }
419
420         for (i = 0; i < priv->nb_tx_queues; i++) {
421                 mc_q->eth_data = dev->data;
422                 mc_q->flow_id = 0xffff;
423                 priv->tx_vq[i] = mc_q++;
424                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
425                 dpaa2_q->cscn = rte_malloc(NULL,
426                                            sizeof(struct qbman_result), 16);
427                 if (!dpaa2_q->cscn)
428                         goto fail_tx;
429         }
430
431         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
432                 /*Setup tx confirmation queues*/
433                 for (i = 0; i < priv->nb_tx_queues; i++) {
434                         mc_q->eth_data = dev->data;
435                         mc_q->tc_index = i;
436                         mc_q->flow_id = 0;
437                         priv->tx_conf_vq[i] = mc_q++;
438                         dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
439                         dpaa2_q->q_storage =
440                                 rte_malloc("dq_storage",
441                                         sizeof(struct queue_storage_info_t),
442                                         RTE_CACHE_LINE_SIZE);
443                         if (!dpaa2_q->q_storage)
444                                 goto fail_tx_conf;
445
446                         memset(dpaa2_q->q_storage, 0,
447                                sizeof(struct queue_storage_info_t));
448                         if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
449                                 goto fail_tx_conf;
450                 }
451         }
452
453         vq_id = 0;
454         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
455                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
456                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
457                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
458                 vq_id++;
459         }
460
461         return 0;
462 fail_tx_conf:
463         i -= 1;
464         while (i >= 0) {
465                 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
466                 rte_free(dpaa2_q->q_storage);
467                 priv->tx_conf_vq[i--] = NULL;
468         }
469         i = priv->nb_tx_queues;
470 fail_tx:
471         i -= 1;
472         while (i >= 0) {
473                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
474                 rte_free(dpaa2_q->cscn);
475                 priv->tx_vq[i--] = NULL;
476         }
477         i = priv->nb_rx_queues;
478 fail:
479         i -= 1;
480         mc_q = priv->rx_vq[0];
481         while (i >= 0) {
482                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
483                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
484                 rte_free(dpaa2_q->q_storage);
485                 priv->rx_vq[i--] = NULL;
486         }
487
488         if (dpaa2_enable_err_queue) {
489                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
490                 if (dpaa2_q->q_storage)
491                         dpaa2_free_dq_storage(dpaa2_q->q_storage);
492                 rte_free(dpaa2_q->q_storage);
493         }
494
495         rte_free(mc_q);
496         return -1;
497 }
498
499 static void
500 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
501 {
502         struct dpaa2_dev_priv *priv = dev->data->dev_private;
503         struct dpaa2_queue *dpaa2_q;
504         int i;
505
506         PMD_INIT_FUNC_TRACE();
507
508         /* Queue allocation base */
509         if (priv->rx_vq[0]) {
510                 /* cleaning up queue storage */
511                 for (i = 0; i < priv->nb_rx_queues; i++) {
512                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
513                         if (dpaa2_q->q_storage)
514                                 rte_free(dpaa2_q->q_storage);
515                 }
516                 /* cleanup tx queue cscn */
517                 for (i = 0; i < priv->nb_tx_queues; i++) {
518                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
519                         rte_free(dpaa2_q->cscn);
520                 }
521                 if (priv->flags & DPAA2_TX_CONF_ENABLE) {
522                         /* cleanup tx conf queue storage */
523                         for (i = 0; i < priv->nb_tx_queues; i++) {
524                                 dpaa2_q = (struct dpaa2_queue *)
525                                                 priv->tx_conf_vq[i];
526                                 rte_free(dpaa2_q->q_storage);
527                         }
528                 }
529                 /*free memory for all queues (RX+TX) */
530                 rte_free(priv->rx_vq[0]);
531                 priv->rx_vq[0] = NULL;
532         }
533 }
534
535 static int
536 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
537 {
538         struct dpaa2_dev_priv *priv = dev->data->dev_private;
539         struct fsl_mc_io *dpni = dev->process_private;
540         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
541         uint64_t rx_offloads = eth_conf->rxmode.offloads;
542         uint64_t tx_offloads = eth_conf->txmode.offloads;
543         int rx_l3_csum_offload = false;
544         int rx_l4_csum_offload = false;
545         int tx_l3_csum_offload = false;
546         int tx_l4_csum_offload = false;
547         int ret, tc_index;
548         uint32_t max_rx_pktlen;
549
550         PMD_INIT_FUNC_TRACE();
551
552         /* Rx offloads which are enabled by default */
553         if (dev_rx_offloads_nodis & ~rx_offloads) {
554                 DPAA2_PMD_INFO(
555                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
556                 " fixed are 0x%" PRIx64,
557                 rx_offloads, dev_rx_offloads_nodis);
558         }
559
560         /* Tx offloads which are enabled by default */
561         if (dev_tx_offloads_nodis & ~tx_offloads) {
562                 DPAA2_PMD_INFO(
563                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
564                 " fixed are 0x%" PRIx64,
565                 tx_offloads, dev_tx_offloads_nodis);
566         }
567
568         max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
569                                 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
570         if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
571                 ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
572                         priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
573                 if (ret != 0) {
574                         DPAA2_PMD_ERR("Unable to set mtu. check config");
575                         return ret;
576                 }
577                 DPAA2_PMD_INFO("MTU configured for the device: %d",
578                                 dev->data->mtu);
579         } else {
580                 return -1;
581         }
582
583         if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
584                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
585                         ret = dpaa2_setup_flow_dist(dev,
586                                         eth_conf->rx_adv_conf.rss_conf.rss_hf,
587                                         tc_index);
588                         if (ret) {
589                                 DPAA2_PMD_ERR(
590                                         "Unable to set flow distribution on tc%d."
591                                         "Check queue config", tc_index);
592                                 return ret;
593                         }
594                 }
595         }
596
597         if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
598                 rx_l3_csum_offload = true;
599
600         if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
601                 (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
602                 (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
603                 rx_l4_csum_offload = true;
604
605         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
606                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
607         if (ret) {
608                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
609                 return ret;
610         }
611
612         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
613                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
614         if (ret) {
615                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
616                 return ret;
617         }
618
619 #if !defined(RTE_LIBRTE_IEEE1588)
620         if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
621 #endif
622         {
623                 ret = rte_mbuf_dyn_rx_timestamp_register(
624                                 &dpaa2_timestamp_dynfield_offset,
625                                 &dpaa2_timestamp_rx_dynflag);
626                 if (ret != 0) {
627                         DPAA2_PMD_ERR("Error to register timestamp field/flag");
628                         return -rte_errno;
629                 }
630                 dpaa2_enable_ts[dev->data->port_id] = true;
631         }
632
633         if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
634                 tx_l3_csum_offload = true;
635
636         if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
637                 (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
638                 (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
639                 tx_l4_csum_offload = true;
640
641         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
642                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
643         if (ret) {
644                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
645                 return ret;
646         }
647
648         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
649                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
650         if (ret) {
651                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
652                 return ret;
653         }
654
655         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
656          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
657          * to 0 for LS2 in the hardware thus disabling data/annotation
658          * stashing. For LX2 this is fixed in hardware and thus hash result and
659          * parse results can be received in FD using this option.
660          */
661         if (dpaa2_svr_family == SVR_LX2160A) {
662                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
663                                        DPNI_FLCTYPE_HASH, true);
664                 if (ret) {
665                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
666                         return ret;
667                 }
668         }
669
670         if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
671                 dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
672
673         if (eth_conf->lpbk_mode) {
674                 ret = dpaa2_dev_recycle_config(dev);
675                 if (ret) {
676                         DPAA2_PMD_ERR("Error to configure %s to recycle port.",
677                                 dev->data->name);
678
679                         return ret;
680                 }
681         } else {
682                 /** User may disable loopback mode by calling
683                  * "dev_configure" with lpbk_mode cleared.
684                  * No matter the port was configured recycle or not,
685                  * recycle de-configure is called here.
686                  * If port is not recycled, the de-configure will return directly.
687                  */
688                 ret = dpaa2_dev_recycle_deconfig(dev);
689                 if (ret) {
690                         DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
691                                 dev->data->name);
692
693                         return ret;
694                 }
695         }
696
697         dpaa2_tm_init(dev);
698
699         return 0;
700 }
701
702 /* Function to setup RX flow information. It contains traffic class ID,
703  * flow ID, destination configuration etc.
704  */
705 static int
706 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
707                          uint16_t rx_queue_id,
708                          uint16_t nb_rx_desc,
709                          unsigned int socket_id __rte_unused,
710                          const struct rte_eth_rxconf *rx_conf,
711                          struct rte_mempool *mb_pool)
712 {
713         struct dpaa2_dev_priv *priv = dev->data->dev_private;
714         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
715         struct dpaa2_queue *dpaa2_q;
716         struct dpni_queue cfg;
717         uint8_t options = 0;
718         uint8_t flow_id;
719         uint32_t bpid;
720         int i, ret;
721
722         PMD_INIT_FUNC_TRACE();
723
724         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
725                         dev, rx_queue_id, mb_pool, rx_conf);
726
727         total_nb_rx_desc += nb_rx_desc;
728         if (total_nb_rx_desc > MAX_NB_RX_DESC) {
729                 DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
730                                MAX_NB_RX_DESC);
731                 DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
732         }
733
734         /* Rx deferred start is not supported */
735         if (rx_conf->rx_deferred_start) {
736                 DPAA2_PMD_ERR("%p:Rx deferred start not supported",
737                                 (void *)dev);
738                 return -EINVAL;
739         }
740
741         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
742                 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
743                         ret = rte_dpaa2_bpid_info_init(mb_pool);
744                         if (ret)
745                                 return ret;
746                 }
747                 bpid = mempool_to_bpid(mb_pool);
748                 ret = dpaa2_attach_bp_list(priv, dpni,
749                                 rte_dpaa2_bpid_info[bpid].bp_list);
750                 if (ret)
751                         return ret;
752         }
753         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
754         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
755         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
756         dpaa2_q->nb_desc = UINT16_MAX;
757         dpaa2_q->offloads = rx_conf->offloads;
758
759         /*Get the flow id from given VQ id*/
760         flow_id = dpaa2_q->flow_id;
761         memset(&cfg, 0, sizeof(struct dpni_queue));
762
763         options = options | DPNI_QUEUE_OPT_USER_CTX;
764         cfg.user_context = (size_t)(dpaa2_q);
765
766         /* check if a private cgr available. */
767         for (i = 0; i < priv->max_cgs; i++) {
768                 if (!priv->cgid_in_use[i]) {
769                         priv->cgid_in_use[i] = 1;
770                         break;
771                 }
772         }
773
774         if (i < priv->max_cgs) {
775                 options |= DPNI_QUEUE_OPT_SET_CGID;
776                 cfg.cgid = i;
777                 dpaa2_q->cgid = cfg.cgid;
778         } else {
779                 dpaa2_q->cgid = 0xff;
780         }
781
782         /*if ls2088 or rev2 device, enable the stashing */
783
784         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
785                 options |= DPNI_QUEUE_OPT_FLC;
786                 cfg.flc.stash_control = true;
787                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
788                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
789                  * data stashing setting 01 01 00 (0x14)
790                  * (in following order ->DS AS CS)
791                  * to enable 1 line data, 1 line annotation.
792                  * For LX2, this setting should be 01 00 00 (0x10)
793                  */
794                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
795                         cfg.flc.value |= 0x10;
796                 else
797                         cfg.flc.value |= 0x14;
798         }
799         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
800                              dpaa2_q->tc_index, flow_id, options, &cfg);
801         if (ret) {
802                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
803                 return -1;
804         }
805
806         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
807                 struct dpni_taildrop taildrop;
808
809                 taildrop.enable = 1;
810                 dpaa2_q->nb_desc = nb_rx_desc;
811                 /* Private CGR will use tail drop length as nb_rx_desc.
812                  * for rest cases we can use standard byte based tail drop.
813                  * There is no HW restriction, but number of CGRs are limited,
814                  * hence this restriction is placed.
815                  */
816                 if (dpaa2_q->cgid != 0xff) {
817                         /*enabling per rx queue congestion control */
818                         taildrop.threshold = nb_rx_desc;
819                         taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
820                         taildrop.oal = 0;
821                         DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
822                                         rx_queue_id);
823                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
824                                                 DPNI_CP_CONGESTION_GROUP,
825                                                 DPNI_QUEUE_RX,
826                                                 dpaa2_q->tc_index,
827                                                 dpaa2_q->cgid, &taildrop);
828                 } else {
829                         /*enabling per rx queue congestion control */
830                         taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
831                         taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
832                         taildrop.oal = CONG_RX_OAL;
833                         DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
834                                         rx_queue_id);
835                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
836                                                 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
837                                                 dpaa2_q->tc_index, flow_id,
838                                                 &taildrop);
839                 }
840                 if (ret) {
841                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
842                                       ret);
843                         return -1;
844                 }
845         } else { /* Disable tail Drop */
846                 struct dpni_taildrop taildrop = {0};
847                 DPAA2_PMD_INFO("Tail drop is disabled on queue");
848
849                 taildrop.enable = 0;
850                 if (dpaa2_q->cgid != 0xff) {
851                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
852                                         DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
853                                         dpaa2_q->tc_index,
854                                         dpaa2_q->cgid, &taildrop);
855                 } else {
856                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
857                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
858                                         dpaa2_q->tc_index, flow_id, &taildrop);
859                 }
860                 if (ret) {
861                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
862                                       ret);
863                         return -1;
864                 }
865         }
866
867         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
868         return 0;
869 }
870
871 static int
872 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
873                          uint16_t tx_queue_id,
874                          uint16_t nb_tx_desc,
875                          unsigned int socket_id __rte_unused,
876                          const struct rte_eth_txconf *tx_conf)
877 {
878         struct dpaa2_dev_priv *priv = dev->data->dev_private;
879         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
880                 priv->tx_vq[tx_queue_id];
881         struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
882                 priv->tx_conf_vq[tx_queue_id];
883         struct fsl_mc_io *dpni = dev->process_private;
884         struct dpni_queue tx_conf_cfg;
885         struct dpni_queue tx_flow_cfg;
886         uint8_t options = 0, flow_id;
887         uint16_t channel_id;
888         struct dpni_queue_id qid;
889         uint32_t tc_id;
890         int ret;
891
892         PMD_INIT_FUNC_TRACE();
893
894         /* Tx deferred start is not supported */
895         if (tx_conf->tx_deferred_start) {
896                 DPAA2_PMD_ERR("%p:Tx deferred start not supported",
897                                 (void *)dev);
898                 return -EINVAL;
899         }
900
901         dpaa2_q->nb_desc = UINT16_MAX;
902         dpaa2_q->offloads = tx_conf->offloads;
903
904         /* Return if queue already configured */
905         if (dpaa2_q->flow_id != 0xffff) {
906                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
907                 return 0;
908         }
909
910         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
911         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
912
913         if (tx_queue_id == 0) {
914                 /*Set tx-conf and error configuration*/
915                 if (priv->flags & DPAA2_TX_CONF_ENABLE)
916                         ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
917                                                             priv->token,
918                                                             DPNI_CONF_AFFINE);
919                 else
920                         ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
921                                                             priv->token,
922                                                             DPNI_CONF_DISABLE);
923                 if (ret) {
924                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
925                                       "err=%d", ret);
926                         return -1;
927                 }
928         }
929
930         tc_id = tx_queue_id % priv->num_tx_tc;
931         channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
932         flow_id = 0;
933
934         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
935                         ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
936         if (ret) {
937                 DPAA2_PMD_ERR("Error in setting the tx flow: "
938                         "tc_id=%d, flow=%d err=%d",
939                         tc_id, flow_id, ret);
940                         return -1;
941         }
942
943         dpaa2_q->flow_id = flow_id;
944
945         dpaa2_q->tc_index = tc_id;
946
947         ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
948                              DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
949                              dpaa2_q->flow_id, &tx_flow_cfg, &qid);
950         if (ret) {
951                 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
952                 return -1;
953         }
954         dpaa2_q->fqid = qid.fqid;
955
956         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
957                 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
958
959                 dpaa2_q->nb_desc = nb_tx_desc;
960
961                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
962                 cong_notif_cfg.threshold_entry = nb_tx_desc;
963                 /* Notify that the queue is not congested when the data in
964                  * the queue is below this threshold.(90% of value)
965                  */
966                 cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
967                 cong_notif_cfg.message_ctx = 0;
968                 cong_notif_cfg.message_iova =
969                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
970                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
971                 cong_notif_cfg.notification_mode =
972                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
973                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
974                                          DPNI_CONG_OPT_COHERENT_WRITE;
975                 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
976
977                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
978                                                        priv->token,
979                                                        DPNI_QUEUE_TX,
980                                                        ((channel_id << 8) | tc_id),
981                                                        &cong_notif_cfg);
982                 if (ret) {
983                         DPAA2_PMD_ERR(
984                            "Error in setting tx congestion notification: "
985                            "err=%d", ret);
986                         return -ret;
987                 }
988         }
989         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
990         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
991
992         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
993                 dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
994                 options = options | DPNI_QUEUE_OPT_USER_CTX;
995                 tx_conf_cfg.user_context = (size_t)(dpaa2_q);
996                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
997                              DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
998                              dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
999                 if (ret) {
1000                         DPAA2_PMD_ERR("Error in setting the tx conf flow: "
1001                               "tc_index=%d, flow=%d err=%d",
1002                               dpaa2_tx_conf_q->tc_index,
1003                               dpaa2_tx_conf_q->flow_id, ret);
1004                         return -1;
1005                 }
1006
1007                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1008                              DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
1009                              dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
1010                 if (ret) {
1011                         DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
1012                         return -1;
1013                 }
1014                 dpaa2_tx_conf_q->fqid = qid.fqid;
1015         }
1016         return 0;
1017 }
1018
1019 static void
1020 dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1021 {
1022         struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
1023         struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
1024         struct fsl_mc_io *dpni =
1025                 (struct fsl_mc_io *)priv->eth_dev->process_private;
1026         uint8_t options = 0;
1027         int ret;
1028         struct dpni_queue cfg;
1029
1030         memset(&cfg, 0, sizeof(struct dpni_queue));
1031         PMD_INIT_FUNC_TRACE();
1032
1033         total_nb_rx_desc -= dpaa2_q->nb_desc;
1034
1035         if (dpaa2_q->cgid != 0xff) {
1036                 options = DPNI_QUEUE_OPT_CLEAR_CGID;
1037                 cfg.cgid = dpaa2_q->cgid;
1038
1039                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
1040                                      DPNI_QUEUE_RX,
1041                                      dpaa2_q->tc_index, dpaa2_q->flow_id,
1042                                      options, &cfg);
1043                 if (ret)
1044                         DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
1045                                         dpaa2_q->fqid, ret);
1046                 priv->cgid_in_use[dpaa2_q->cgid] = 0;
1047                 dpaa2_q->cgid = 0xff;
1048         }
1049 }
1050
1051 static uint32_t
1052 dpaa2_dev_rx_queue_count(void *rx_queue)
1053 {
1054         int32_t ret;
1055         struct dpaa2_queue *dpaa2_q;
1056         struct qbman_swp *swp;
1057         struct qbman_fq_query_np_rslt state;
1058         uint32_t frame_cnt = 0;
1059
1060         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1061                 ret = dpaa2_affine_qbman_swp();
1062                 if (ret) {
1063                         DPAA2_PMD_ERR(
1064                                 "Failed to allocate IO portal, tid: %d\n",
1065                                 rte_gettid());
1066                         return -EINVAL;
1067                 }
1068         }
1069         swp = DPAA2_PER_LCORE_PORTAL;
1070
1071         dpaa2_q = rx_queue;
1072
1073         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1074                 frame_cnt = qbman_fq_state_frame_count(&state);
1075                 DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
1076                                 rx_queue, frame_cnt);
1077         }
1078         return frame_cnt;
1079 }
1080
1081 static const uint32_t *
1082 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1083 {
1084         static const uint32_t ptypes[] = {
1085                 /*todo -= add more types */
1086                 RTE_PTYPE_L2_ETHER,
1087                 RTE_PTYPE_L3_IPV4,
1088                 RTE_PTYPE_L3_IPV4_EXT,
1089                 RTE_PTYPE_L3_IPV6,
1090                 RTE_PTYPE_L3_IPV6_EXT,
1091                 RTE_PTYPE_L4_TCP,
1092                 RTE_PTYPE_L4_UDP,
1093                 RTE_PTYPE_L4_SCTP,
1094                 RTE_PTYPE_L4_ICMP,
1095                 RTE_PTYPE_UNKNOWN
1096         };
1097
1098         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1099                 dev->rx_pkt_burst == dpaa2_dev_rx ||
1100                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1101                 return ptypes;
1102         return NULL;
1103 }
1104
1105 /**
1106  * Dpaa2 link Interrupt handler
1107  *
1108  * @param param
1109  *  The address of parameter (struct rte_eth_dev *) registered before.
1110  *
1111  * @return
1112  *  void
1113  */
1114 static void
1115 dpaa2_interrupt_handler(void *param)
1116 {
1117         struct rte_eth_dev *dev = param;
1118         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1119         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1120         int ret;
1121         int irq_index = DPNI_IRQ_INDEX;
1122         unsigned int status = 0, clear = 0;
1123
1124         PMD_INIT_FUNC_TRACE();
1125
1126         if (dpni == NULL) {
1127                 DPAA2_PMD_ERR("dpni is NULL");
1128                 return;
1129         }
1130
1131         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1132                                   irq_index, &status);
1133         if (unlikely(ret)) {
1134                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1135                 clear = 0xffffffff;
1136                 goto out;
1137         }
1138
1139         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1140                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1141                 dpaa2_dev_link_update(dev, 0);
1142                 /* calling all the apps registered for link status event */
1143                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1144         }
1145 out:
1146         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1147                                     irq_index, clear);
1148         if (unlikely(ret))
1149                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1150 }
1151
1152 static int
1153 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1154 {
1155         int err = 0;
1156         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1157         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1158         int irq_index = DPNI_IRQ_INDEX;
1159         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1160
1161         PMD_INIT_FUNC_TRACE();
1162
1163         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1164                                 irq_index, mask);
1165         if (err < 0) {
1166                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1167                               strerror(-err));
1168                 return err;
1169         }
1170
1171         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1172                                   irq_index, enable);
1173         if (err < 0)
1174                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1175                               strerror(-err));
1176
1177         return err;
1178 }
1179
1180 static int
1181 dpaa2_dev_start(struct rte_eth_dev *dev)
1182 {
1183         struct rte_device *rdev = dev->device;
1184         struct rte_dpaa2_device *dpaa2_dev;
1185         struct rte_eth_dev_data *data = dev->data;
1186         struct dpaa2_dev_priv *priv = data->dev_private;
1187         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1188         struct dpni_queue cfg;
1189         struct dpni_error_cfg   err_cfg;
1190         struct dpni_queue_id qid;
1191         struct dpaa2_queue *dpaa2_q;
1192         int ret, i;
1193         struct rte_intr_handle *intr_handle;
1194
1195         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1196         intr_handle = dpaa2_dev->intr_handle;
1197
1198         PMD_INIT_FUNC_TRACE();
1199         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1200         if (ret) {
1201                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1202                               priv->hw_id, ret);
1203                 return ret;
1204         }
1205
1206         /* Power up the phy. Needed to make the link go UP */
1207         dpaa2_dev_set_link_up(dev);
1208
1209         for (i = 0; i < data->nb_rx_queues; i++) {
1210                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1211                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1212                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
1213                                        dpaa2_q->flow_id, &cfg, &qid);
1214                 if (ret) {
1215                         DPAA2_PMD_ERR("Error in getting flow information: "
1216                                       "err=%d", ret);
1217                         return ret;
1218                 }
1219                 dpaa2_q->fqid = qid.fqid;
1220         }
1221
1222         if (dpaa2_enable_err_queue) {
1223                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1224                                      DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
1225                 if (ret) {
1226                         DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
1227                                                 ret);
1228                         return ret;
1229                 }
1230                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
1231                 dpaa2_q->fqid = qid.fqid;
1232                 dpaa2_q->eth_data = dev->data;
1233
1234                 err_cfg.errors =  DPNI_ERROR_DISC;
1235                 err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
1236         } else {
1237                 /* checksum errors, send them to normal path
1238                  * and set it in annotation
1239                  */
1240                 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1241
1242                 /* if packet with parse error are not to be dropped */
1243                 err_cfg.errors |= DPNI_ERROR_PHE;
1244
1245                 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1246         }
1247         err_cfg.set_frame_annotation = true;
1248
1249         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1250                                        priv->token, &err_cfg);
1251         if (ret) {
1252                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1253                               ret);
1254                 return ret;
1255         }
1256
1257         /* if the interrupts were configured on this devices*/
1258         if (intr_handle && rte_intr_fd_get(intr_handle) &&
1259             dev->data->dev_conf.intr_conf.lsc != 0) {
1260                 /* Registering LSC interrupt handler */
1261                 rte_intr_callback_register(intr_handle,
1262                                            dpaa2_interrupt_handler,
1263                                            (void *)dev);
1264
1265                 /* enable vfio intr/eventfd mapping
1266                  * Interrupt index 0 is required, so we can not use
1267                  * rte_intr_enable.
1268                  */
1269                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1270
1271                 /* enable dpni_irqs */
1272                 dpaa2_eth_setup_irqs(dev, 1);
1273         }
1274
1275         /* Change the tx burst function if ordered queues are used */
1276         if (priv->en_ordered)
1277                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1278
1279         return 0;
1280 }
1281
1282 /**
1283  *  This routine disables all traffic on the adapter by issuing a
1284  *  global reset on the MAC.
1285  */
1286 static int
1287 dpaa2_dev_stop(struct rte_eth_dev *dev)
1288 {
1289         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1290         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1291         int ret;
1292         struct rte_eth_link link;
1293         struct rte_device *rdev = dev->device;
1294         struct rte_intr_handle *intr_handle;
1295         struct rte_dpaa2_device *dpaa2_dev;
1296
1297         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1298         intr_handle = dpaa2_dev->intr_handle;
1299
1300         PMD_INIT_FUNC_TRACE();
1301
1302         /* reset interrupt callback  */
1303         if (intr_handle && rte_intr_fd_get(intr_handle) &&
1304             dev->data->dev_conf.intr_conf.lsc != 0) {
1305                 /*disable dpni irqs */
1306                 dpaa2_eth_setup_irqs(dev, 0);
1307
1308                 /* disable vfio intr before callback unregister */
1309                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1310
1311                 /* Unregistering LSC interrupt handler */
1312                 rte_intr_callback_unregister(intr_handle,
1313                                              dpaa2_interrupt_handler,
1314                                              (void *)dev);
1315         }
1316
1317         dpaa2_dev_set_link_down(dev);
1318
1319         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1320         if (ret) {
1321                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1322                               ret, priv->hw_id);
1323                 return ret;
1324         }
1325
1326         /* clear the recorded link status */
1327         memset(&link, 0, sizeof(link));
1328         rte_eth_linkstatus_set(dev, &link);
1329
1330         return 0;
1331 }
1332
1333 static int
1334 dpaa2_dev_close(struct rte_eth_dev *dev)
1335 {
1336         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1337         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1338         int i, ret;
1339         struct rte_eth_link link;
1340
1341         PMD_INIT_FUNC_TRACE();
1342
1343         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1344                 return 0;
1345
1346         if (!dpni) {
1347                 DPAA2_PMD_WARN("Already closed or not started");
1348                 return -1;
1349         }
1350
1351         dpaa2_tm_deinit(dev);
1352         dpaa2_flow_clean(dev);
1353         /* Clean the device first */
1354         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1355         if (ret) {
1356                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1357                 return -1;
1358         }
1359
1360         memset(&link, 0, sizeof(link));
1361         rte_eth_linkstatus_set(dev, &link);
1362
1363         /* Free private queues memory */
1364         dpaa2_free_rx_tx_queues(dev);
1365         /* Close the device at underlying layer*/
1366         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1367         if (ret) {
1368                 DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
1369                               ret);
1370         }
1371
1372         /* Free the allocated memory for ethernet private data and dpni*/
1373         priv->hw = NULL;
1374         dev->process_private = NULL;
1375         rte_free(dpni);
1376
1377         for (i = 0; i < MAX_TCS; i++)
1378                 rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
1379
1380         if (priv->extract.qos_extract_param)
1381                 rte_free((void *)(size_t)priv->extract.qos_extract_param);
1382
1383         DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1384         return 0;
1385 }
1386
1387 static int
1388 dpaa2_dev_promiscuous_enable(
1389                 struct rte_eth_dev *dev)
1390 {
1391         int ret;
1392         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1393         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1394
1395         PMD_INIT_FUNC_TRACE();
1396
1397         if (dpni == NULL) {
1398                 DPAA2_PMD_ERR("dpni is NULL");
1399                 return -ENODEV;
1400         }
1401
1402         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1403         if (ret < 0)
1404                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1405
1406         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1407         if (ret < 0)
1408                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1409
1410         return ret;
1411 }
1412
1413 static int
1414 dpaa2_dev_promiscuous_disable(
1415                 struct rte_eth_dev *dev)
1416 {
1417         int ret;
1418         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1419         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1420
1421         PMD_INIT_FUNC_TRACE();
1422
1423         if (dpni == NULL) {
1424                 DPAA2_PMD_ERR("dpni is NULL");
1425                 return -ENODEV;
1426         }
1427
1428         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1429         if (ret < 0)
1430                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1431
1432         if (dev->data->all_multicast == 0) {
1433                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1434                                                  priv->token, false);
1435                 if (ret < 0)
1436                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1437                                       ret);
1438         }
1439
1440         return ret;
1441 }
1442
1443 static int
1444 dpaa2_dev_allmulticast_enable(
1445                 struct rte_eth_dev *dev)
1446 {
1447         int ret;
1448         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1449         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1450
1451         PMD_INIT_FUNC_TRACE();
1452
1453         if (dpni == NULL) {
1454                 DPAA2_PMD_ERR("dpni is NULL");
1455                 return -ENODEV;
1456         }
1457
1458         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1459         if (ret < 0)
1460                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1461
1462         return ret;
1463 }
1464
1465 static int
1466 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1467 {
1468         int ret;
1469         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1470         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1471
1472         PMD_INIT_FUNC_TRACE();
1473
1474         if (dpni == NULL) {
1475                 DPAA2_PMD_ERR("dpni is NULL");
1476                 return -ENODEV;
1477         }
1478
1479         /* must remain on for all promiscuous */
1480         if (dev->data->promiscuous == 1)
1481                 return 0;
1482
1483         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1484         if (ret < 0)
1485                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1486
1487         return ret;
1488 }
1489
1490 static int
1491 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1492 {
1493         int ret;
1494         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1495         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1496         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1497                                 + VLAN_TAG_SIZE;
1498
1499         PMD_INIT_FUNC_TRACE();
1500
1501         if (dpni == NULL) {
1502                 DPAA2_PMD_ERR("dpni is NULL");
1503                 return -EINVAL;
1504         }
1505
1506         /* Set the Max Rx frame length as 'mtu' +
1507          * Maximum Ethernet header length
1508          */
1509         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1510                                         frame_size - RTE_ETHER_CRC_LEN);
1511         if (ret) {
1512                 DPAA2_PMD_ERR("Setting the max frame length failed");
1513                 return -1;
1514         }
1515         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1516         return 0;
1517 }
1518
1519 static int
1520 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1521                        struct rte_ether_addr *addr,
1522                        __rte_unused uint32_t index,
1523                        __rte_unused uint32_t pool)
1524 {
1525         int ret;
1526         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1527         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1528
1529         PMD_INIT_FUNC_TRACE();
1530
1531         if (dpni == NULL) {
1532                 DPAA2_PMD_ERR("dpni is NULL");
1533                 return -1;
1534         }
1535
1536         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1537                                 addr->addr_bytes, 0, 0, 0);
1538         if (ret)
1539                 DPAA2_PMD_ERR(
1540                         "error: Adding the MAC ADDR failed: err = %d", ret);
1541         return 0;
1542 }
1543
1544 static void
1545 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1546                           uint32_t index)
1547 {
1548         int ret;
1549         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1550         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1551         struct rte_eth_dev_data *data = dev->data;
1552         struct rte_ether_addr *macaddr;
1553
1554         PMD_INIT_FUNC_TRACE();
1555
1556         macaddr = &data->mac_addrs[index];
1557
1558         if (dpni == NULL) {
1559                 DPAA2_PMD_ERR("dpni is NULL");
1560                 return;
1561         }
1562
1563         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1564                                    priv->token, macaddr->addr_bytes);
1565         if (ret)
1566                 DPAA2_PMD_ERR(
1567                         "error: Removing the MAC ADDR failed: err = %d", ret);
1568 }
1569
1570 static int
1571 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1572                        struct rte_ether_addr *addr)
1573 {
1574         int ret;
1575         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1576         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1577
1578         PMD_INIT_FUNC_TRACE();
1579
1580         if (dpni == NULL) {
1581                 DPAA2_PMD_ERR("dpni is NULL");
1582                 return -EINVAL;
1583         }
1584
1585         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1586                                         priv->token, addr->addr_bytes);
1587
1588         if (ret)
1589                 DPAA2_PMD_ERR(
1590                         "error: Setting the MAC ADDR failed %d", ret);
1591
1592         return ret;
1593 }
1594
1595 static
1596 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1597                          struct rte_eth_stats *stats)
1598 {
1599         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1600         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1601         int32_t  retcode;
1602         uint8_t page0 = 0, page1 = 1, page2 = 2;
1603         union dpni_statistics value;
1604         int i;
1605         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1606
1607         memset(&value, 0, sizeof(union dpni_statistics));
1608
1609         PMD_INIT_FUNC_TRACE();
1610
1611         if (!dpni) {
1612                 DPAA2_PMD_ERR("dpni is NULL");
1613                 return -EINVAL;
1614         }
1615
1616         if (!stats) {
1617                 DPAA2_PMD_ERR("stats is NULL");
1618                 return -EINVAL;
1619         }
1620
1621         /*Get Counters from page_0*/
1622         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1623                                       page0, 0, &value);
1624         if (retcode)
1625                 goto err;
1626
1627         stats->ipackets = value.page_0.ingress_all_frames;
1628         stats->ibytes = value.page_0.ingress_all_bytes;
1629
1630         /*Get Counters from page_1*/
1631         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1632                                       page1, 0, &value);
1633         if (retcode)
1634                 goto err;
1635
1636         stats->opackets = value.page_1.egress_all_frames;
1637         stats->obytes = value.page_1.egress_all_bytes;
1638
1639         /*Get Counters from page_2*/
1640         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1641                                       page2, 0, &value);
1642         if (retcode)
1643                 goto err;
1644
1645         /* Ingress drop frame count due to configured rules */
1646         stats->ierrors = value.page_2.ingress_filtered_frames;
1647         /* Ingress drop frame count due to error */
1648         stats->ierrors += value.page_2.ingress_discarded_frames;
1649
1650         stats->oerrors = value.page_2.egress_discarded_frames;
1651         stats->imissed = value.page_2.ingress_nobuffer_discards;
1652
1653         /* Fill in per queue stats */
1654         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1655                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1656                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1657                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1658                 if (dpaa2_rxq)
1659                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1660                 if (dpaa2_txq)
1661                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1662
1663                 /* Byte counting is not implemented */
1664                 stats->q_ibytes[i]   = 0;
1665                 stats->q_obytes[i]   = 0;
1666         }
1667
1668         return 0;
1669
1670 err:
1671         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1672         return retcode;
1673 };
1674
1675 static int
1676 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1677                      unsigned int n)
1678 {
1679         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1680         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1681         int32_t  retcode;
1682         union dpni_statistics value[5] = {};
1683         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1684
1685         if (n < num)
1686                 return num;
1687
1688         if (xstats == NULL)
1689                 return 0;
1690
1691         /* Get Counters from page_0*/
1692         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1693                                       0, 0, &value[0]);
1694         if (retcode)
1695                 goto err;
1696
1697         /* Get Counters from page_1*/
1698         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1699                                       1, 0, &value[1]);
1700         if (retcode)
1701                 goto err;
1702
1703         /* Get Counters from page_2*/
1704         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1705                                       2, 0, &value[2]);
1706         if (retcode)
1707                 goto err;
1708
1709         for (i = 0; i < priv->max_cgs; i++) {
1710                 if (!priv->cgid_in_use[i]) {
1711                         /* Get Counters from page_4*/
1712                         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1713                                                       priv->token,
1714                                                       4, 0, &value[4]);
1715                         if (retcode)
1716                                 goto err;
1717                         break;
1718                 }
1719         }
1720
1721         for (i = 0; i < num; i++) {
1722                 xstats[i].id = i;
1723                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1724                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1725         }
1726         return i;
1727 err:
1728         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1729         return retcode;
1730 }
1731
1732 static int
1733 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1734                        struct rte_eth_xstat_name *xstats_names,
1735                        unsigned int limit)
1736 {
1737         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1738
1739         if (limit < stat_cnt)
1740                 return stat_cnt;
1741
1742         if (xstats_names != NULL)
1743                 for (i = 0; i < stat_cnt; i++)
1744                         strlcpy(xstats_names[i].name,
1745                                 dpaa2_xstats_strings[i].name,
1746                                 sizeof(xstats_names[i].name));
1747
1748         return stat_cnt;
1749 }
1750
1751 static int
1752 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1753                        uint64_t *values, unsigned int n)
1754 {
1755         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1756         uint64_t values_copy[stat_cnt];
1757
1758         if (!ids) {
1759                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1760                 struct fsl_mc_io *dpni =
1761                         (struct fsl_mc_io *)dev->process_private;
1762                 int32_t  retcode;
1763                 union dpni_statistics value[5] = {};
1764
1765                 if (n < stat_cnt)
1766                         return stat_cnt;
1767
1768                 if (!values)
1769                         return 0;
1770
1771                 /* Get Counters from page_0*/
1772                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1773                                               0, 0, &value[0]);
1774                 if (retcode)
1775                         return 0;
1776
1777                 /* Get Counters from page_1*/
1778                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1779                                               1, 0, &value[1]);
1780                 if (retcode)
1781                         return 0;
1782
1783                 /* Get Counters from page_2*/
1784                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1785                                               2, 0, &value[2]);
1786                 if (retcode)
1787                         return 0;
1788
1789                 /* Get Counters from page_4*/
1790                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1791                                               4, 0, &value[4]);
1792                 if (retcode)
1793                         return 0;
1794
1795                 for (i = 0; i < stat_cnt; i++) {
1796                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1797                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1798                 }
1799                 return stat_cnt;
1800         }
1801
1802         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1803
1804         for (i = 0; i < n; i++) {
1805                 if (ids[i] >= stat_cnt) {
1806                         DPAA2_PMD_ERR("xstats id value isn't valid");
1807                         return -1;
1808                 }
1809                 values[i] = values_copy[ids[i]];
1810         }
1811         return n;
1812 }
1813
1814 static int
1815 dpaa2_xstats_get_names_by_id(
1816         struct rte_eth_dev *dev,
1817         const uint64_t *ids,
1818         struct rte_eth_xstat_name *xstats_names,
1819         unsigned int limit)
1820 {
1821         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1822         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1823
1824         if (!ids)
1825                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1826
1827         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1828
1829         for (i = 0; i < limit; i++) {
1830                 if (ids[i] >= stat_cnt) {
1831                         DPAA2_PMD_ERR("xstats id value isn't valid");
1832                         return -1;
1833                 }
1834                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1835         }
1836         return limit;
1837 }
1838
1839 static int
1840 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1841 {
1842         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1843         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1844         int retcode;
1845         int i;
1846         struct dpaa2_queue *dpaa2_q;
1847
1848         PMD_INIT_FUNC_TRACE();
1849
1850         if (dpni == NULL) {
1851                 DPAA2_PMD_ERR("dpni is NULL");
1852                 return -EINVAL;
1853         }
1854
1855         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1856         if (retcode)
1857                 goto error;
1858
1859         /* Reset the per queue stats in dpaa2_queue structure */
1860         for (i = 0; i < priv->nb_rx_queues; i++) {
1861                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1862                 if (dpaa2_q)
1863                         dpaa2_q->rx_pkts = 0;
1864         }
1865
1866         for (i = 0; i < priv->nb_tx_queues; i++) {
1867                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1868                 if (dpaa2_q)
1869                         dpaa2_q->tx_pkts = 0;
1870         }
1871
1872         return 0;
1873
1874 error:
1875         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1876         return retcode;
1877 };
1878
1879 /* return 0 means link status changed, -1 means not changed */
1880 static int
1881 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1882                       int wait_to_complete)
1883 {
1884         int ret;
1885         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1886         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1887         struct rte_eth_link link;
1888         struct dpni_link_state state = {0};
1889         uint8_t count;
1890
1891         if (dpni == NULL) {
1892                 DPAA2_PMD_ERR("dpni is NULL");
1893                 return 0;
1894         }
1895
1896         for (count = 0; count <= MAX_REPEAT_TIME; count++) {
1897                 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token,
1898                                           &state);
1899                 if (ret < 0) {
1900                         DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1901                         return -1;
1902                 }
1903                 if (state.up == RTE_ETH_LINK_DOWN &&
1904                     wait_to_complete)
1905                         rte_delay_ms(CHECK_INTERVAL);
1906                 else
1907                         break;
1908         }
1909
1910         memset(&link, 0, sizeof(struct rte_eth_link));
1911         link.link_status = state.up;
1912         link.link_speed = state.rate;
1913
1914         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1915                 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
1916         else
1917                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
1918
1919         ret = rte_eth_linkstatus_set(dev, &link);
1920         if (ret == -1)
1921                 DPAA2_PMD_DEBUG("No change in status");
1922         else
1923                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1924                                link.link_status ? "Up" : "Down");
1925
1926         return ret;
1927 }
1928
1929 /**
1930  * Toggle the DPNI to enable, if not already enabled.
1931  * This is not strictly PHY up/down - it is more of logical toggling.
1932  */
1933 static int
1934 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1935 {
1936         int ret = -EINVAL;
1937         struct dpaa2_dev_priv *priv;
1938         struct fsl_mc_io *dpni;
1939         int en = 0;
1940         struct dpni_link_state state = {0};
1941
1942         priv = dev->data->dev_private;
1943         dpni = (struct fsl_mc_io *)dev->process_private;
1944
1945         if (dpni == NULL) {
1946                 DPAA2_PMD_ERR("dpni is NULL");
1947                 return ret;
1948         }
1949
1950         /* Check if DPNI is currently enabled */
1951         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1952         if (ret) {
1953                 /* Unable to obtain dpni status; Not continuing */
1954                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1955                 return -EINVAL;
1956         }
1957
1958         /* Enable link if not already enabled */
1959         if (!en) {
1960                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1961                 if (ret) {
1962                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1963                         return -EINVAL;
1964                 }
1965         }
1966         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1967         if (ret < 0) {
1968                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1969                 return -1;
1970         }
1971
1972         /* changing tx burst function to start enqueues */
1973         dev->tx_pkt_burst = dpaa2_dev_tx;
1974         dev->data->dev_link.link_status = state.up;
1975         dev->data->dev_link.link_speed = state.rate;
1976
1977         if (state.up)
1978                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1979         else
1980                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1981         return ret;
1982 }
1983
1984 /**
1985  * Toggle the DPNI to disable, if not already disabled.
1986  * This is not strictly PHY up/down - it is more of logical toggling.
1987  */
1988 static int
1989 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1990 {
1991         int ret = -EINVAL;
1992         struct dpaa2_dev_priv *priv;
1993         struct fsl_mc_io *dpni;
1994         int dpni_enabled = 0;
1995         int retries = 10;
1996
1997         PMD_INIT_FUNC_TRACE();
1998
1999         priv = dev->data->dev_private;
2000         dpni = (struct fsl_mc_io *)dev->process_private;
2001
2002         if (dpni == NULL) {
2003                 DPAA2_PMD_ERR("Device has not yet been configured");
2004                 return ret;
2005         }
2006
2007         /*changing  tx burst function to avoid any more enqueues */
2008         dev->tx_pkt_burst = dummy_dev_tx;
2009
2010         /* Loop while dpni_disable() attempts to drain the egress FQs
2011          * and confirm them back to us.
2012          */
2013         do {
2014                 ret = dpni_disable(dpni, 0, priv->token);
2015                 if (ret) {
2016                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
2017                         return ret;
2018                 }
2019                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
2020                 if (ret) {
2021                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
2022                         return ret;
2023                 }
2024                 if (dpni_enabled)
2025                         /* Allow the MC some slack */
2026                         rte_delay_us(100 * 1000);
2027         } while (dpni_enabled && --retries);
2028
2029         if (!retries) {
2030                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
2031                 /* todo- we may have to manually cleanup queues.
2032                  */
2033         } else {
2034                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
2035                                dev->data->port_id);
2036         }
2037
2038         dev->data->dev_link.link_status = 0;
2039
2040         return ret;
2041 }
2042
2043 static int
2044 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2045 {
2046         int ret = -EINVAL;
2047         struct dpaa2_dev_priv *priv;
2048         struct fsl_mc_io *dpni;
2049         struct dpni_link_state state = {0};
2050
2051         PMD_INIT_FUNC_TRACE();
2052
2053         priv = dev->data->dev_private;
2054         dpni = (struct fsl_mc_io *)dev->process_private;
2055
2056         if (dpni == NULL || fc_conf == NULL) {
2057                 DPAA2_PMD_ERR("device not configured");
2058                 return ret;
2059         }
2060
2061         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2062         if (ret) {
2063                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
2064                 return ret;
2065         }
2066
2067         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
2068         if (state.options & DPNI_LINK_OPT_PAUSE) {
2069                 /* DPNI_LINK_OPT_PAUSE set
2070                  *  if ASYM_PAUSE not set,
2071                  *      RX Side flow control (handle received Pause frame)
2072                  *      TX side flow control (send Pause frame)
2073                  *  if ASYM_PAUSE set,
2074                  *      RX Side flow control (handle received Pause frame)
2075                  *      No TX side flow control (send Pause frame disabled)
2076                  */
2077                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
2078                         fc_conf->mode = RTE_ETH_FC_FULL;
2079                 else
2080                         fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2081         } else {
2082                 /* DPNI_LINK_OPT_PAUSE not set
2083                  *  if ASYM_PAUSE set,
2084                  *      TX side flow control (send Pause frame)
2085                  *      No RX side flow control (No action on pause frame rx)
2086                  *  if ASYM_PAUSE not set,
2087                  *      Flow control disabled
2088                  */
2089                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
2090                         fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2091                 else
2092                         fc_conf->mode = RTE_ETH_FC_NONE;
2093         }
2094
2095         return ret;
2096 }
2097
2098 static int
2099 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2100 {
2101         int ret = -EINVAL;
2102         struct dpaa2_dev_priv *priv;
2103         struct fsl_mc_io *dpni;
2104         struct dpni_link_state state = {0};
2105         struct dpni_link_cfg cfg = {0};
2106
2107         PMD_INIT_FUNC_TRACE();
2108
2109         priv = dev->data->dev_private;
2110         dpni = (struct fsl_mc_io *)dev->process_private;
2111
2112         if (dpni == NULL) {
2113                 DPAA2_PMD_ERR("dpni is NULL");
2114                 return ret;
2115         }
2116
2117         /* It is necessary to obtain the current state before setting fc_conf
2118          * as MC would return error in case rate, autoneg or duplex values are
2119          * different.
2120          */
2121         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2122         if (ret) {
2123                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2124                 return -1;
2125         }
2126
2127         /* Disable link before setting configuration */
2128         dpaa2_dev_set_link_down(dev);
2129
2130         /* Based on fc_conf, update cfg */
2131         cfg.rate = state.rate;
2132         cfg.options = state.options;
2133
2134         /* update cfg with fc_conf */
2135         switch (fc_conf->mode) {
2136         case RTE_ETH_FC_FULL:
2137                 /* Full flow control;
2138                  * OPT_PAUSE set, ASYM_PAUSE not set
2139                  */
2140                 cfg.options |= DPNI_LINK_OPT_PAUSE;
2141                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2142                 break;
2143         case RTE_ETH_FC_TX_PAUSE:
2144                 /* Enable RX flow control
2145                  * OPT_PAUSE not set;
2146                  * ASYM_PAUSE set;
2147                  */
2148                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2149                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2150                 break;
2151         case RTE_ETH_FC_RX_PAUSE:
2152                 /* Enable TX Flow control
2153                  * OPT_PAUSE set
2154                  * ASYM_PAUSE set
2155                  */
2156                 cfg.options |= DPNI_LINK_OPT_PAUSE;
2157                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2158                 break;
2159         case RTE_ETH_FC_NONE:
2160                 /* Disable Flow control
2161                  * OPT_PAUSE not set
2162                  * ASYM_PAUSE not set
2163                  */
2164                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2165                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2166                 break;
2167         default:
2168                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2169                               fc_conf->mode);
2170                 return -1;
2171         }
2172
2173         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2174         if (ret)
2175                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2176                               ret);
2177
2178         /* Enable link */
2179         dpaa2_dev_set_link_up(dev);
2180
2181         return ret;
2182 }
2183
2184 static int
2185 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2186                           struct rte_eth_rss_conf *rss_conf)
2187 {
2188         struct rte_eth_dev_data *data = dev->data;
2189         struct dpaa2_dev_priv *priv = data->dev_private;
2190         struct rte_eth_conf *eth_conf = &data->dev_conf;
2191         int ret, tc_index;
2192
2193         PMD_INIT_FUNC_TRACE();
2194
2195         if (rss_conf->rss_hf) {
2196                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2197                         ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2198                                 tc_index);
2199                         if (ret) {
2200                                 DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2201                                         tc_index);
2202                                 return ret;
2203                         }
2204                 }
2205         } else {
2206                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2207                         ret = dpaa2_remove_flow_dist(dev, tc_index);
2208                         if (ret) {
2209                                 DPAA2_PMD_ERR(
2210                                         "Unable to remove flow dist on tc%d",
2211                                         tc_index);
2212                                 return ret;
2213                         }
2214                 }
2215         }
2216         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2217         return 0;
2218 }
2219
2220 static int
2221 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2222                             struct rte_eth_rss_conf *rss_conf)
2223 {
2224         struct rte_eth_dev_data *data = dev->data;
2225         struct rte_eth_conf *eth_conf = &data->dev_conf;
2226
2227         /* dpaa2 does not support rss_key, so length should be 0*/
2228         rss_conf->rss_key_len = 0;
2229         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2230         return 0;
2231 }
2232
2233 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2234                 int eth_rx_queue_id,
2235                 struct dpaa2_dpcon_dev *dpcon,
2236                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2237 {
2238         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2239         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2240         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2241         uint8_t flow_id = dpaa2_ethq->flow_id;
2242         struct dpni_queue cfg;
2243         uint8_t options, priority;
2244         int ret;
2245
2246         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2247                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2248         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2249                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2250         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2251                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2252         else
2253                 return -EINVAL;
2254
2255         priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2256                    (dpcon->num_priorities - 1);
2257
2258         memset(&cfg, 0, sizeof(struct dpni_queue));
2259         options = DPNI_QUEUE_OPT_DEST;
2260         cfg.destination.type = DPNI_DEST_DPCON;
2261         cfg.destination.id = dpcon->dpcon_id;
2262         cfg.destination.priority = priority;
2263
2264         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2265                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2266                 cfg.destination.hold_active = 1;
2267         }
2268
2269         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2270                         !eth_priv->en_ordered) {
2271                 struct opr_cfg ocfg;
2272
2273                 /* Restoration window size = 256 frames */
2274                 ocfg.oprrws = 3;
2275                 /* Restoration window size = 512 frames for LX2 */
2276                 if (dpaa2_svr_family == SVR_LX2160A)
2277                         ocfg.oprrws = 4;
2278                 /* Auto advance NESN window enabled */
2279                 ocfg.oa = 1;
2280                 /* Late arrival window size disabled */
2281                 ocfg.olws = 0;
2282                 /* ORL resource exhaustion advance NESN disabled */
2283                 ocfg.oeane = 0;
2284                 /* Loose ordering enabled */
2285                 ocfg.oloe = 1;
2286                 eth_priv->en_loose_ordered = 1;
2287                 /* Strict ordering enabled if explicitly set */
2288                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2289                         ocfg.oloe = 0;
2290                         eth_priv->en_loose_ordered = 0;
2291                 }
2292
2293                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2294                                    dpaa2_ethq->tc_index, flow_id,
2295                                    OPR_OPT_CREATE, &ocfg, 0);
2296                 if (ret) {
2297                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2298                         return ret;
2299                 }
2300
2301                 eth_priv->en_ordered = 1;
2302         }
2303
2304         options |= DPNI_QUEUE_OPT_USER_CTX;
2305         cfg.user_context = (size_t)(dpaa2_ethq);
2306
2307         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2308                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
2309         if (ret) {
2310                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2311                 return ret;
2312         }
2313
2314         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2315
2316         return 0;
2317 }
2318
2319 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2320                 int eth_rx_queue_id)
2321 {
2322         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2323         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2324         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2325         uint8_t flow_id = dpaa2_ethq->flow_id;
2326         struct dpni_queue cfg;
2327         uint8_t options;
2328         int ret;
2329
2330         memset(&cfg, 0, sizeof(struct dpni_queue));
2331         options = DPNI_QUEUE_OPT_DEST;
2332         cfg.destination.type = DPNI_DEST_NONE;
2333
2334         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2335                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
2336         if (ret)
2337                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2338
2339         return ret;
2340 }
2341
2342 static int
2343 dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
2344                        const struct rte_flow_ops **ops)
2345 {
2346         if (!dev)
2347                 return -ENODEV;
2348
2349         *ops = &dpaa2_flow_ops;
2350         return 0;
2351 }
2352
2353 static void
2354 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2355         struct rte_eth_rxq_info *qinfo)
2356 {
2357         struct dpaa2_queue *rxq;
2358         struct dpaa2_dev_priv *priv = dev->data->dev_private;
2359         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2360         uint16_t max_frame_length;
2361
2362         rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2363
2364         qinfo->mp = rxq->mb_pool;
2365         qinfo->scattered_rx = dev->data->scattered_rx;
2366         qinfo->nb_desc = rxq->nb_desc;
2367         if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
2368                                 &max_frame_length) == 0)
2369                 qinfo->rx_buf_size = max_frame_length;
2370
2371         qinfo->conf.rx_free_thresh = 1;
2372         qinfo->conf.rx_drop_en = 1;
2373         qinfo->conf.rx_deferred_start = 0;
2374         qinfo->conf.offloads = rxq->offloads;
2375 }
2376
2377 static void
2378 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2379         struct rte_eth_txq_info *qinfo)
2380 {
2381         struct dpaa2_queue *txq;
2382
2383         txq = dev->data->tx_queues[queue_id];
2384
2385         qinfo->nb_desc = txq->nb_desc;
2386         qinfo->conf.tx_thresh.pthresh = 0;
2387         qinfo->conf.tx_thresh.hthresh = 0;
2388         qinfo->conf.tx_thresh.wthresh = 0;
2389
2390         qinfo->conf.tx_free_thresh = 0;
2391         qinfo->conf.tx_rs_thresh = 0;
2392         qinfo->conf.offloads = txq->offloads;
2393         qinfo->conf.tx_deferred_start = 0;
2394 }
2395
2396 static int
2397 dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2398 {
2399         *(const void **)ops = &dpaa2_tm_ops;
2400
2401         return 0;
2402 }
2403
2404 void
2405 rte_pmd_dpaa2_thread_init(void)
2406 {
2407         int ret;
2408
2409         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
2410                 ret = dpaa2_affine_qbman_swp();
2411                 if (ret) {
2412                         DPAA2_PMD_ERR(
2413                                 "Failed to allocate IO portal, tid: %d\n",
2414                                 rte_gettid());
2415                         return;
2416                 }
2417         }
2418 }
2419
2420 static struct eth_dev_ops dpaa2_ethdev_ops = {
2421         .dev_configure    = dpaa2_eth_dev_configure,
2422         .dev_start            = dpaa2_dev_start,
2423         .dev_stop             = dpaa2_dev_stop,
2424         .dev_close            = dpaa2_dev_close,
2425         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2426         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2427         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2428         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2429         .dev_set_link_up      = dpaa2_dev_set_link_up,
2430         .dev_set_link_down    = dpaa2_dev_set_link_down,
2431         .link_update       = dpaa2_dev_link_update,
2432         .stats_get             = dpaa2_dev_stats_get,
2433         .xstats_get            = dpaa2_dev_xstats_get,
2434         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
2435         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2436         .xstats_get_names      = dpaa2_xstats_get_names,
2437         .stats_reset       = dpaa2_dev_stats_reset,
2438         .xstats_reset         = dpaa2_dev_stats_reset,
2439         .fw_version_get    = dpaa2_fw_version_get,
2440         .dev_infos_get     = dpaa2_dev_info_get,
2441         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2442         .mtu_set           = dpaa2_dev_mtu_set,
2443         .vlan_filter_set      = dpaa2_vlan_filter_set,
2444         .vlan_offload_set     = dpaa2_vlan_offload_set,
2445         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
2446         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2447         .rx_queue_release  = dpaa2_dev_rx_queue_release,
2448         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2449         .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2450         .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2451         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
2452         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
2453         .mac_addr_add         = dpaa2_dev_add_mac_addr,
2454         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2455         .mac_addr_set         = dpaa2_dev_set_mac_addr,
2456         .rss_hash_update      = dpaa2_dev_rss_hash_update,
2457         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2458         .flow_ops_get         = dpaa2_dev_flow_ops_get,
2459         .rxq_info_get         = dpaa2_rxq_info_get,
2460         .txq_info_get         = dpaa2_txq_info_get,
2461         .tm_ops_get           = dpaa2_tm_ops_get,
2462 #if defined(RTE_LIBRTE_IEEE1588)
2463         .timesync_enable      = dpaa2_timesync_enable,
2464         .timesync_disable     = dpaa2_timesync_disable,
2465         .timesync_read_time   = dpaa2_timesync_read_time,
2466         .timesync_write_time  = dpaa2_timesync_write_time,
2467         .timesync_adjust_time = dpaa2_timesync_adjust_time,
2468         .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2469         .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2470 #endif
2471 };
2472
2473 /* Populate the mac address from physically available (u-boot/firmware) and/or
2474  * one set by higher layers like MC (restool) etc.
2475  * Returns the table of MAC entries (multiple entries)
2476  */
2477 static int
2478 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2479                   struct rte_ether_addr *mac_entry)
2480 {
2481         int ret;
2482         struct rte_ether_addr phy_mac, prime_mac;
2483
2484         memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2485         memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2486
2487         /* Get the physical device MAC address */
2488         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2489                                      phy_mac.addr_bytes);
2490         if (ret) {
2491                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2492                 goto cleanup;
2493         }
2494
2495         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2496                                         prime_mac.addr_bytes);
2497         if (ret) {
2498                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2499                 goto cleanup;
2500         }
2501
2502         /* Now that both MAC have been obtained, do:
2503          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2504          *     and return phy
2505          *  If empty_mac(phy), return prime.
2506          *  if both are empty, create random MAC, set as prime and return
2507          */
2508         if (!rte_is_zero_ether_addr(&phy_mac)) {
2509                 /* If the addresses are not same, overwrite prime */
2510                 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2511                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2512                                                         priv->token,
2513                                                         phy_mac.addr_bytes);
2514                         if (ret) {
2515                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2516                                               ret);
2517                                 goto cleanup;
2518                         }
2519                         memcpy(&prime_mac, &phy_mac,
2520                                 sizeof(struct rte_ether_addr));
2521                 }
2522         } else if (rte_is_zero_ether_addr(&prime_mac)) {
2523                 /* In case phys and prime, both are zero, create random MAC */
2524                 rte_eth_random_addr(prime_mac.addr_bytes);
2525                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2526                                                 priv->token,
2527                                                 prime_mac.addr_bytes);
2528                 if (ret) {
2529                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2530                         goto cleanup;
2531                 }
2532         }
2533
2534         /* prime_mac the final MAC address */
2535         memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2536         return 0;
2537
2538 cleanup:
2539         return -1;
2540 }
2541
2542 static int
2543 check_devargs_handler(__rte_unused const char *key, const char *value,
2544                       __rte_unused void *opaque)
2545 {
2546         if (strcmp(value, "1"))
2547                 return -1;
2548
2549         return 0;
2550 }
2551
2552 static int
2553 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2554 {
2555         struct rte_kvargs *kvlist;
2556
2557         if (!devargs)
2558                 return 0;
2559
2560         kvlist = rte_kvargs_parse(devargs->args, NULL);
2561         if (!kvlist)
2562                 return 0;
2563
2564         if (!rte_kvargs_count(kvlist, key)) {
2565                 rte_kvargs_free(kvlist);
2566                 return 0;
2567         }
2568
2569         if (rte_kvargs_process(kvlist, key,
2570                                check_devargs_handler, NULL) < 0) {
2571                 rte_kvargs_free(kvlist);
2572                 return 0;
2573         }
2574         rte_kvargs_free(kvlist);
2575
2576         return 1;
2577 }
2578
2579 static int
2580 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2581 {
2582         struct rte_device *dev = eth_dev->device;
2583         struct rte_dpaa2_device *dpaa2_dev;
2584         struct fsl_mc_io *dpni_dev;
2585         struct dpni_attr attr;
2586         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2587         struct dpni_buffer_layout layout;
2588         int ret, hw_id, i;
2589
2590         PMD_INIT_FUNC_TRACE();
2591
2592         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2593         if (!dpni_dev) {
2594                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2595                 return -1;
2596         }
2597         dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2598         eth_dev->process_private = (void *)dpni_dev;
2599
2600         /* For secondary processes, the primary has done all the work */
2601         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2602                 /* In case of secondary, only burst and ops API need to be
2603                  * plugged.
2604                  */
2605                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2606                 eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2607                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2608                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2609                 else if (dpaa2_get_devargs(dev->devargs,
2610                                         DRIVER_NO_PREFETCH_MODE))
2611                         eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2612                 else
2613                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2614                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2615                 return 0;
2616         }
2617
2618         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2619
2620         hw_id = dpaa2_dev->object_id;
2621         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2622         if (ret) {
2623                 DPAA2_PMD_ERR(
2624                              "Failure in opening dpni@%d with err code %d",
2625                              hw_id, ret);
2626                 rte_free(dpni_dev);
2627                 return -1;
2628         }
2629
2630         if (eth_dev->data->dev_conf.lpbk_mode)
2631                 dpaa2_dev_recycle_deconfig(eth_dev);
2632
2633         /* Clean the device first */
2634         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2635         if (ret) {
2636                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2637                               hw_id, ret);
2638                 goto init_err;
2639         }
2640
2641         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2642         if (ret) {
2643                 DPAA2_PMD_ERR(
2644                              "Failure in get dpni@%d attribute, err code %d",
2645                              hw_id, ret);
2646                 goto init_err;
2647         }
2648
2649         priv->num_rx_tc = attr.num_rx_tcs;
2650         priv->num_tx_tc = attr.num_tx_tcs;
2651         priv->qos_entries = attr.qos_entries;
2652         priv->fs_entries = attr.fs_entries;
2653         priv->dist_queues = attr.num_queues;
2654         priv->num_channels = attr.num_channels;
2655         priv->channel_inuse = 0;
2656         rte_spinlock_init(&priv->lpbk_qp_lock);
2657
2658         /* only if the custom CG is enabled */
2659         if (attr.options & DPNI_OPT_CUSTOM_CG)
2660                 priv->max_cgs = attr.num_cgs;
2661         else
2662                 priv->max_cgs = 0;
2663
2664         for (i = 0; i < priv->max_cgs; i++)
2665                 priv->cgid_in_use[i] = 0;
2666
2667         for (i = 0; i < attr.num_rx_tcs; i++)
2668                 priv->nb_rx_queues += attr.num_queues;
2669
2670         priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
2671
2672         DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2673                         priv->num_rx_tc, priv->nb_rx_queues,
2674                         priv->nb_tx_queues, priv->max_cgs);
2675
2676         priv->hw = dpni_dev;
2677         priv->hw_id = hw_id;
2678         priv->options = attr.options;
2679         priv->max_mac_filters = attr.mac_filter_entries;
2680         priv->max_vlan_filters = attr.vlan_filter_entries;
2681         priv->flags = 0;
2682 #if defined(RTE_LIBRTE_IEEE1588)
2683         printf("DPDK IEEE1588 is enabled\n");
2684         priv->flags |= DPAA2_TX_CONF_ENABLE;
2685 #endif
2686         /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
2687         if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
2688                 priv->flags |= DPAA2_TX_CONF_ENABLE;
2689                 DPAA2_PMD_INFO("TX_CONF Enabled");
2690         }
2691
2692         if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
2693                 dpaa2_enable_err_queue = 1;
2694                 DPAA2_PMD_INFO("Enable error queue");
2695         }
2696
2697         /* Allocate memory for hardware structure for queues */
2698         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2699         if (ret) {
2700                 DPAA2_PMD_ERR("Queue allocation Failed");
2701                 goto init_err;
2702         }
2703
2704         /* Allocate memory for storing MAC addresses.
2705          * Table of mac_filter_entries size is allocated so that RTE ether lib
2706          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2707          */
2708         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2709                 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2710         if (eth_dev->data->mac_addrs == NULL) {
2711                 DPAA2_PMD_ERR(
2712                    "Failed to allocate %d bytes needed to store MAC addresses",
2713                    RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2714                 ret = -ENOMEM;
2715                 goto init_err;
2716         }
2717
2718         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2719         if (ret) {
2720                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2721                 rte_free(eth_dev->data->mac_addrs);
2722                 eth_dev->data->mac_addrs = NULL;
2723                 goto init_err;
2724         }
2725
2726         /* ... tx buffer layout ... */
2727         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2728         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2729                 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2730                                  DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2731                 layout.pass_timestamp = true;
2732         } else {
2733                 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2734         }
2735         layout.pass_frame_status = 1;
2736         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2737                                      DPNI_QUEUE_TX, &layout);
2738         if (ret) {
2739                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2740                 goto init_err;
2741         }
2742
2743         /* ... tx-conf and error buffer layout ... */
2744         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2745         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2746                 layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2747                 layout.pass_timestamp = true;
2748         }
2749         layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2750         layout.pass_frame_status = 1;
2751         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2752                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2753         if (ret) {
2754                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2755                              ret);
2756                 goto init_err;
2757         }
2758
2759         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2760
2761         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2762                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2763                 DPAA2_PMD_INFO("Loopback mode");
2764         } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2765                 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2766                 DPAA2_PMD_INFO("No Prefetch mode");
2767         } else {
2768                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2769         }
2770         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2771
2772         /* Init fields w.r.t. classification */
2773         memset(&priv->extract.qos_key_extract, 0,
2774                 sizeof(struct dpaa2_key_extract));
2775         priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2776         if (!priv->extract.qos_extract_param) {
2777                 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2778                             " classification ", ret);
2779                 goto init_err;
2780         }
2781         priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2782                 IP_ADDRESS_OFFSET_INVALID;
2783         priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2784                 IP_ADDRESS_OFFSET_INVALID;
2785         priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2786                 IP_ADDRESS_OFFSET_INVALID;
2787         priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2788                 IP_ADDRESS_OFFSET_INVALID;
2789
2790         for (i = 0; i < MAX_TCS; i++) {
2791                 memset(&priv->extract.tc_key_extract[i], 0,
2792                         sizeof(struct dpaa2_key_extract));
2793                 priv->extract.tc_extract_param[i] =
2794                         (size_t)rte_malloc(NULL, 256, 64);
2795                 if (!priv->extract.tc_extract_param[i]) {
2796                         DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
2797                                      ret);
2798                         goto init_err;
2799                 }
2800                 priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2801                         IP_ADDRESS_OFFSET_INVALID;
2802                 priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2803                         IP_ADDRESS_OFFSET_INVALID;
2804                 priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2805                         IP_ADDRESS_OFFSET_INVALID;
2806                 priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2807                         IP_ADDRESS_OFFSET_INVALID;
2808         }
2809
2810         ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2811                                         RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2812                                         + VLAN_TAG_SIZE);
2813         if (ret) {
2814                 DPAA2_PMD_ERR("Unable to set mtu. check config");
2815                 goto init_err;
2816         }
2817
2818         /*TODO To enable soft parser support DPAA2 driver needs to integrate
2819          * with external entity to receive byte code for software sequence
2820          * and same will be offload to the H/W using MC interface.
2821          * Currently it is assumed that DPAA2 driver has byte code by some
2822          * mean and same if offloaded to H/W.
2823          */
2824         if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2825                 WRIOP_SS_INITIALIZER(priv);
2826                 ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2827                 if (ret < 0) {
2828                         DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2829                                       ret);
2830                         return ret;
2831                 }
2832
2833                 ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2834                                                          DPNI_SS_INGRESS);
2835                 if (ret < 0) {
2836                         DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2837                                       ret);
2838                         return ret;
2839                 }
2840         }
2841         RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
2842                 eth_dev->data->name, dpaa2_dev->ep_name);
2843
2844         return 0;
2845 init_err:
2846         dpaa2_dev_close(eth_dev);
2847
2848         return ret;
2849 }
2850
2851 int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev)
2852 {
2853         return dev->device->driver == &rte_dpaa2_pmd.driver;
2854 }
2855
2856 static int
2857 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2858                 struct rte_dpaa2_device *dpaa2_dev)
2859 {
2860         struct rte_eth_dev *eth_dev;
2861         struct dpaa2_dev_priv *dev_priv;
2862         int diag;
2863
2864         if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2865                 RTE_PKTMBUF_HEADROOM) {
2866                 DPAA2_PMD_ERR(
2867                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2868                 RTE_PKTMBUF_HEADROOM,
2869                 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2870
2871                 return -1;
2872         }
2873
2874         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2875                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2876                 if (!eth_dev)
2877                         return -ENODEV;
2878                 dev_priv = rte_zmalloc("ethdev private structure",
2879                                        sizeof(struct dpaa2_dev_priv),
2880                                        RTE_CACHE_LINE_SIZE);
2881                 if (dev_priv == NULL) {
2882                         DPAA2_PMD_CRIT(
2883                                 "Unable to allocate memory for private data");
2884                         rte_eth_dev_release_port(eth_dev);
2885                         return -ENOMEM;
2886                 }
2887                 eth_dev->data->dev_private = (void *)dev_priv;
2888                 /* Store a pointer to eth_dev in dev_private */
2889                 dev_priv->eth_dev = eth_dev;
2890         } else {
2891                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2892                 if (!eth_dev) {
2893                         DPAA2_PMD_DEBUG("returning enodev");
2894                         return -ENODEV;
2895                 }
2896         }
2897
2898         eth_dev->device = &dpaa2_dev->device;
2899
2900         dpaa2_dev->eth_dev = eth_dev;
2901         eth_dev->data->rx_mbuf_alloc_failed = 0;
2902
2903         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2904                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2905
2906         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2907
2908         /* Invoke PMD device initialization function */
2909         diag = dpaa2_dev_init(eth_dev);
2910         if (diag == 0) {
2911                 rte_eth_dev_probing_finish(eth_dev);
2912                 return 0;
2913         }
2914
2915         rte_eth_dev_release_port(eth_dev);
2916         return diag;
2917 }
2918
2919 static int
2920 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2921 {
2922         struct rte_eth_dev *eth_dev;
2923         int ret;
2924
2925         eth_dev = dpaa2_dev->eth_dev;
2926         dpaa2_dev_close(eth_dev);
2927         ret = rte_eth_dev_release_port(eth_dev);
2928
2929         return ret;
2930 }
2931
2932 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2933         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2934         .drv_type = DPAA2_ETH,
2935         .probe = rte_dpaa2_probe,
2936         .remove = rte_dpaa2_remove,
2937 };
2938
2939 RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd);
2940 RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME,
2941                 DRIVER_LOOPBACK_MODE "=<int> "
2942                 DRIVER_NO_PREFETCH_MODE "=<int>"
2943                 DRIVER_TX_CONF "=<int>"
2944                 DRIVER_ERROR_QUEUE "=<int>");
2945 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);