d892819a7ef5e8c8f63df894e35102461ac78a8f
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20 #include <rte_flow_driver.h>
21
22 #include "dpaa2_pmd_logs.h"
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_mempool.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <mc/fsl_dpmng.h>
28 #include "dpaa2_ethdev.h"
29 #include "dpaa2_sparser.h"
30 #include <fsl_qbman_debug.h>
31
32 #define DRIVER_LOOPBACK_MODE "drv_loopback"
33 #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
34 #define DRIVER_TX_CONF "drv_tx_conf"
35 #define DRIVER_ERROR_QUEUE  "drv_err_queue"
36 #define CHECK_INTERVAL         100  /* 100ms */
37 #define MAX_REPEAT_TIME        90   /* 9s (90 * 100ms) in total */
38
39 /* Supported Rx offloads */
40 static uint64_t dev_rx_offloads_sup =
41                 DEV_RX_OFFLOAD_CHECKSUM |
42                 DEV_RX_OFFLOAD_SCTP_CKSUM |
43                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
44                 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
45                 DEV_RX_OFFLOAD_VLAN_STRIP |
46                 DEV_RX_OFFLOAD_VLAN_FILTER |
47                 DEV_RX_OFFLOAD_JUMBO_FRAME |
48                 DEV_RX_OFFLOAD_TIMESTAMP;
49
50 /* Rx offloads which cannot be disabled */
51 static uint64_t dev_rx_offloads_nodis =
52                 DEV_RX_OFFLOAD_RSS_HASH |
53                 DEV_RX_OFFLOAD_SCATTER;
54
55 /* Supported Tx offloads */
56 static uint64_t dev_tx_offloads_sup =
57                 DEV_TX_OFFLOAD_VLAN_INSERT |
58                 DEV_TX_OFFLOAD_IPV4_CKSUM |
59                 DEV_TX_OFFLOAD_UDP_CKSUM |
60                 DEV_TX_OFFLOAD_TCP_CKSUM |
61                 DEV_TX_OFFLOAD_SCTP_CKSUM |
62                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
63                 DEV_TX_OFFLOAD_MT_LOCKFREE |
64                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
65
66 /* Tx offloads which cannot be disabled */
67 static uint64_t dev_tx_offloads_nodis =
68                 DEV_TX_OFFLOAD_MULTI_SEGS;
69
70 /* enable timestamp in mbuf */
71 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
72 uint64_t dpaa2_timestamp_rx_dynflag;
73 int dpaa2_timestamp_dynfield_offset = -1;
74
75 /* Enable error queue */
76 bool dpaa2_enable_err_queue;
77
78 struct rte_dpaa2_xstats_name_off {
79         char name[RTE_ETH_XSTATS_NAME_SIZE];
80         uint8_t page_id; /* dpni statistics page id */
81         uint8_t stats_id; /* stats id in the given page */
82 };
83
84 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
85         {"ingress_multicast_frames", 0, 2},
86         {"ingress_multicast_bytes", 0, 3},
87         {"ingress_broadcast_frames", 0, 4},
88         {"ingress_broadcast_bytes", 0, 5},
89         {"egress_multicast_frames", 1, 2},
90         {"egress_multicast_bytes", 1, 3},
91         {"egress_broadcast_frames", 1, 4},
92         {"egress_broadcast_bytes", 1, 5},
93         {"ingress_filtered_frames", 2, 0},
94         {"ingress_discarded_frames", 2, 1},
95         {"ingress_nobuffer_discards", 2, 2},
96         {"egress_discarded_frames", 2, 3},
97         {"egress_confirmed_frames", 2, 4},
98         {"cgr_reject_frames", 4, 0},
99         {"cgr_reject_bytes", 4, 1},
100 };
101
102 static struct rte_dpaa2_driver rte_dpaa2_pmd;
103 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
104                                  int wait_to_complete);
105 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
106 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
107 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
108
109 static int
110 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
111 {
112         int ret;
113         struct dpaa2_dev_priv *priv = dev->data->dev_private;
114         struct fsl_mc_io *dpni = dev->process_private;
115
116         PMD_INIT_FUNC_TRACE();
117
118         if (dpni == NULL) {
119                 DPAA2_PMD_ERR("dpni is NULL");
120                 return -1;
121         }
122
123         if (on)
124                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token,
125                                        vlan_id, 0, 0, 0);
126         else
127                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
128                                           priv->token, vlan_id);
129
130         if (ret < 0)
131                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
132                               ret, vlan_id, priv->hw_id);
133
134         return ret;
135 }
136
137 static int
138 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
139 {
140         struct dpaa2_dev_priv *priv = dev->data->dev_private;
141         struct fsl_mc_io *dpni = dev->process_private;
142         int ret = 0;
143
144         PMD_INIT_FUNC_TRACE();
145
146         if (mask & ETH_VLAN_FILTER_MASK) {
147                 /* VLAN Filter not avaialble */
148                 if (!priv->max_vlan_filters) {
149                         DPAA2_PMD_INFO("VLAN filter not available");
150                         return -ENOTSUP;
151                 }
152
153                 if (dev->data->dev_conf.rxmode.offloads &
154                         DEV_RX_OFFLOAD_VLAN_FILTER)
155                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
156                                                       priv->token, true);
157                 else
158                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
159                                                       priv->token, false);
160                 if (ret < 0)
161                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
162         }
163
164         return ret;
165 }
166
167 static int
168 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
169                       enum rte_vlan_type vlan_type __rte_unused,
170                       uint16_t tpid)
171 {
172         struct dpaa2_dev_priv *priv = dev->data->dev_private;
173         struct fsl_mc_io *dpni = dev->process_private;
174         int ret = -ENOTSUP;
175
176         PMD_INIT_FUNC_TRACE();
177
178         /* nothing to be done for standard vlan tpids */
179         if (tpid == 0x8100 || tpid == 0x88A8)
180                 return 0;
181
182         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
183                                    priv->token, tpid);
184         if (ret < 0)
185                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
186         /* if already configured tpids, remove them first */
187         if (ret == -EBUSY) {
188                 struct dpni_custom_tpid_cfg tpid_list = {0};
189
190                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
191                                    priv->token, &tpid_list);
192                 if (ret < 0)
193                         goto fail;
194                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
195                                    priv->token, tpid_list.tpid1);
196                 if (ret < 0)
197                         goto fail;
198                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
199                                            priv->token, tpid);
200         }
201 fail:
202         return ret;
203 }
204
205 static int
206 dpaa2_fw_version_get(struct rte_eth_dev *dev,
207                      char *fw_version,
208                      size_t fw_size)
209 {
210         int ret;
211         struct fsl_mc_io *dpni = dev->process_private;
212         struct mc_soc_version mc_plat_info = {0};
213         struct mc_version mc_ver_info = {0};
214
215         PMD_INIT_FUNC_TRACE();
216
217         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
218                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
219
220         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
221                 DPAA2_PMD_WARN("\tmc_get_version failed");
222
223         ret = snprintf(fw_version, fw_size,
224                        "%x-%d.%d.%d",
225                        mc_plat_info.svr,
226                        mc_ver_info.major,
227                        mc_ver_info.minor,
228                        mc_ver_info.revision);
229         if (ret < 0)
230                 return -EINVAL;
231
232         ret += 1; /* add the size of '\0' */
233         if (fw_size < (size_t)ret)
234                 return ret;
235         else
236                 return 0;
237 }
238
239 static int
240 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
241 {
242         struct dpaa2_dev_priv *priv = dev->data->dev_private;
243
244         PMD_INIT_FUNC_TRACE();
245
246         dev_info->max_mac_addrs = priv->max_mac_filters;
247         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
248         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
249         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
250         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
251         dev_info->rx_offload_capa = dev_rx_offloads_sup |
252                                         dev_rx_offloads_nodis;
253         dev_info->tx_offload_capa = dev_tx_offloads_sup |
254                                         dev_tx_offloads_nodis;
255         dev_info->speed_capa = ETH_LINK_SPEED_1G |
256                         ETH_LINK_SPEED_2_5G |
257                         ETH_LINK_SPEED_10G;
258
259         dev_info->max_hash_mac_addrs = 0;
260         dev_info->max_vfs = 0;
261         dev_info->max_vmdq_pools = ETH_16_POOLS;
262         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
263
264         dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
265         /* same is rx size for best perf */
266         dev_info->default_txportconf.burst_size = dpaa2_dqrr_size;
267
268         dev_info->default_rxportconf.nb_queues = 1;
269         dev_info->default_txportconf.nb_queues = 1;
270         dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD;
271         dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
272
273         if (dpaa2_svr_family == SVR_LX2160A) {
274                 dev_info->speed_capa |= ETH_LINK_SPEED_25G |
275                                 ETH_LINK_SPEED_40G |
276                                 ETH_LINK_SPEED_50G |
277                                 ETH_LINK_SPEED_100G;
278         }
279
280         return 0;
281 }
282
283 static int
284 dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
285                         __rte_unused uint16_t queue_id,
286                         struct rte_eth_burst_mode *mode)
287 {
288         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
289         int ret = -EINVAL;
290         unsigned int i;
291         const struct burst_info {
292                 uint64_t flags;
293                 const char *output;
294         } rx_offload_map[] = {
295                         {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
296                         {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
297                         {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
298                         {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
299                         {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
300                         {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
301                         {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
302                         {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
303                         {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
304                         {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
305         };
306
307         /* Update Rx offload info */
308         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
309                 if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) {
310                         snprintf(mode->info, sizeof(mode->info), "%s",
311                                 rx_offload_map[i].output);
312                         ret = 0;
313                         break;
314                 }
315         }
316         return ret;
317 }
318
319 static int
320 dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
321                         __rte_unused uint16_t queue_id,
322                         struct rte_eth_burst_mode *mode)
323 {
324         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
325         int ret = -EINVAL;
326         unsigned int i;
327         const struct burst_info {
328                 uint64_t flags;
329                 const char *output;
330         } tx_offload_map[] = {
331                         {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
332                         {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
333                         {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
334                         {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
335                         {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
336                         {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
337                         {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
338                         {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
339                         {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
340         };
341
342         /* Update Tx offload info */
343         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
344                 if (eth_conf->txmode.offloads & tx_offload_map[i].flags) {
345                         snprintf(mode->info, sizeof(mode->info), "%s",
346                                 tx_offload_map[i].output);
347                         ret = 0;
348                         break;
349                 }
350         }
351         return ret;
352 }
353
354 static int
355 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
356 {
357         struct dpaa2_dev_priv *priv = dev->data->dev_private;
358         uint16_t dist_idx;
359         uint32_t vq_id;
360         uint8_t num_rxqueue_per_tc;
361         struct dpaa2_queue *mc_q, *mcq;
362         uint32_t tot_queues;
363         int i;
364         struct dpaa2_queue *dpaa2_q;
365
366         PMD_INIT_FUNC_TRACE();
367
368         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
369         if (priv->flags & DPAA2_TX_CONF_ENABLE)
370                 tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues;
371         else
372                 tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
373         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
374                           RTE_CACHE_LINE_SIZE);
375         if (!mc_q) {
376                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
377                 return -1;
378         }
379
380         for (i = 0; i < priv->nb_rx_queues; i++) {
381                 mc_q->eth_data = dev->data;
382                 priv->rx_vq[i] = mc_q++;
383                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
384                 dpaa2_q->q_storage = rte_malloc("dq_storage",
385                                         sizeof(struct queue_storage_info_t),
386                                         RTE_CACHE_LINE_SIZE);
387                 if (!dpaa2_q->q_storage)
388                         goto fail;
389
390                 memset(dpaa2_q->q_storage, 0,
391                        sizeof(struct queue_storage_info_t));
392                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
393                         goto fail;
394         }
395
396         if (dpaa2_enable_err_queue) {
397                 priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
398                         sizeof(struct dpaa2_queue), 0);
399
400                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
401                 dpaa2_q->q_storage = rte_malloc("err_dq_storage",
402                                         sizeof(struct queue_storage_info_t) *
403                                         RTE_MAX_LCORE,
404                                         RTE_CACHE_LINE_SIZE);
405                 if (!dpaa2_q->q_storage)
406                         goto fail;
407
408                 memset(dpaa2_q->q_storage, 0,
409                        sizeof(struct queue_storage_info_t));
410                 for (i = 0; i < RTE_MAX_LCORE; i++)
411                         if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
412                                 goto fail;
413         }
414
415         for (i = 0; i < priv->nb_tx_queues; i++) {
416                 mc_q->eth_data = dev->data;
417                 mc_q->flow_id = 0xffff;
418                 priv->tx_vq[i] = mc_q++;
419                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
420                 dpaa2_q->cscn = rte_malloc(NULL,
421                                            sizeof(struct qbman_result), 16);
422                 if (!dpaa2_q->cscn)
423                         goto fail_tx;
424         }
425
426         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
427                 /*Setup tx confirmation queues*/
428                 for (i = 0; i < priv->nb_tx_queues; i++) {
429                         mc_q->eth_data = dev->data;
430                         mc_q->tc_index = i;
431                         mc_q->flow_id = 0;
432                         priv->tx_conf_vq[i] = mc_q++;
433                         dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
434                         dpaa2_q->q_storage =
435                                 rte_malloc("dq_storage",
436                                         sizeof(struct queue_storage_info_t),
437                                         RTE_CACHE_LINE_SIZE);
438                         if (!dpaa2_q->q_storage)
439                                 goto fail_tx_conf;
440
441                         memset(dpaa2_q->q_storage, 0,
442                                sizeof(struct queue_storage_info_t));
443                         if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
444                                 goto fail_tx_conf;
445                 }
446         }
447
448         vq_id = 0;
449         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
450                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
451                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
452                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
453                 vq_id++;
454         }
455
456         return 0;
457 fail_tx_conf:
458         i -= 1;
459         while (i >= 0) {
460                 dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
461                 rte_free(dpaa2_q->q_storage);
462                 priv->tx_conf_vq[i--] = NULL;
463         }
464         i = priv->nb_tx_queues;
465 fail_tx:
466         i -= 1;
467         while (i >= 0) {
468                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
469                 rte_free(dpaa2_q->cscn);
470                 priv->tx_vq[i--] = NULL;
471         }
472         i = priv->nb_rx_queues;
473 fail:
474         i -= 1;
475         mc_q = priv->rx_vq[0];
476         while (i >= 0) {
477                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
478                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
479                 rte_free(dpaa2_q->q_storage);
480                 priv->rx_vq[i--] = NULL;
481         }
482
483         if (dpaa2_enable_err_queue) {
484                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
485                 if (dpaa2_q->q_storage)
486                         dpaa2_free_dq_storage(dpaa2_q->q_storage);
487                 rte_free(dpaa2_q->q_storage);
488         }
489
490         rte_free(mc_q);
491         return -1;
492 }
493
494 static void
495 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
496 {
497         struct dpaa2_dev_priv *priv = dev->data->dev_private;
498         struct dpaa2_queue *dpaa2_q;
499         int i;
500
501         PMD_INIT_FUNC_TRACE();
502
503         /* Queue allocation base */
504         if (priv->rx_vq[0]) {
505                 /* cleaning up queue storage */
506                 for (i = 0; i < priv->nb_rx_queues; i++) {
507                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
508                         if (dpaa2_q->q_storage)
509                                 rte_free(dpaa2_q->q_storage);
510                 }
511                 /* cleanup tx queue cscn */
512                 for (i = 0; i < priv->nb_tx_queues; i++) {
513                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
514                         rte_free(dpaa2_q->cscn);
515                 }
516                 if (priv->flags & DPAA2_TX_CONF_ENABLE) {
517                         /* cleanup tx conf queue storage */
518                         for (i = 0; i < priv->nb_tx_queues; i++) {
519                                 dpaa2_q = (struct dpaa2_queue *)
520                                                 priv->tx_conf_vq[i];
521                                 rte_free(dpaa2_q->q_storage);
522                         }
523                 }
524                 /*free memory for all queues (RX+TX) */
525                 rte_free(priv->rx_vq[0]);
526                 priv->rx_vq[0] = NULL;
527         }
528 }
529
530 static int
531 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
532 {
533         struct dpaa2_dev_priv *priv = dev->data->dev_private;
534         struct fsl_mc_io *dpni = dev->process_private;
535         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
536         uint64_t rx_offloads = eth_conf->rxmode.offloads;
537         uint64_t tx_offloads = eth_conf->txmode.offloads;
538         int rx_l3_csum_offload = false;
539         int rx_l4_csum_offload = false;
540         int tx_l3_csum_offload = false;
541         int tx_l4_csum_offload = false;
542         int ret, tc_index;
543
544         PMD_INIT_FUNC_TRACE();
545
546         /* Rx offloads which are enabled by default */
547         if (dev_rx_offloads_nodis & ~rx_offloads) {
548                 DPAA2_PMD_INFO(
549                 "Some of rx offloads enabled by default - requested 0x%" PRIx64
550                 " fixed are 0x%" PRIx64,
551                 rx_offloads, dev_rx_offloads_nodis);
552         }
553
554         /* Tx offloads which are enabled by default */
555         if (dev_tx_offloads_nodis & ~tx_offloads) {
556                 DPAA2_PMD_INFO(
557                 "Some of tx offloads enabled by default - requested 0x%" PRIx64
558                 " fixed are 0x%" PRIx64,
559                 tx_offloads, dev_tx_offloads_nodis);
560         }
561
562         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
563                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
564                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
565                                 priv->token, eth_conf->rxmode.max_rx_pkt_len
566                                 - RTE_ETHER_CRC_LEN);
567                         if (ret) {
568                                 DPAA2_PMD_ERR(
569                                         "Unable to set mtu. check config");
570                                 return ret;
571                         }
572                         dev->data->mtu =
573                                 dev->data->dev_conf.rxmode.max_rx_pkt_len -
574                                 RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
575                                 VLAN_TAG_SIZE;
576                 } else {
577                         return -1;
578                 }
579         }
580
581         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
582                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
583                         ret = dpaa2_setup_flow_dist(dev,
584                                         eth_conf->rx_adv_conf.rss_conf.rss_hf,
585                                         tc_index);
586                         if (ret) {
587                                 DPAA2_PMD_ERR(
588                                         "Unable to set flow distribution on tc%d."
589                                         "Check queue config", tc_index);
590                                 return ret;
591                         }
592                 }
593         }
594
595         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
596                 rx_l3_csum_offload = true;
597
598         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
599                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
600                 (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
601                 rx_l4_csum_offload = true;
602
603         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
604                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
605         if (ret) {
606                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
607                 return ret;
608         }
609
610         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
611                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
612         if (ret) {
613                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
614                 return ret;
615         }
616
617 #if !defined(RTE_LIBRTE_IEEE1588)
618         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
619 #endif
620         {
621                 ret = rte_mbuf_dyn_rx_timestamp_register(
622                                 &dpaa2_timestamp_dynfield_offset,
623                                 &dpaa2_timestamp_rx_dynflag);
624                 if (ret != 0) {
625                         DPAA2_PMD_ERR("Error to register timestamp field/flag");
626                         return -rte_errno;
627                 }
628                 dpaa2_enable_ts[dev->data->port_id] = true;
629         }
630
631         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
632                 tx_l3_csum_offload = true;
633
634         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
635                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
636                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
637                 tx_l4_csum_offload = true;
638
639         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
640                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
641         if (ret) {
642                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
643                 return ret;
644         }
645
646         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
647                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
648         if (ret) {
649                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
650                 return ret;
651         }
652
653         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
654          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
655          * to 0 for LS2 in the hardware thus disabling data/annotation
656          * stashing. For LX2 this is fixed in hardware and thus hash result and
657          * parse results can be received in FD using this option.
658          */
659         if (dpaa2_svr_family == SVR_LX2160A) {
660                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
661                                        DPNI_FLCTYPE_HASH, true);
662                 if (ret) {
663                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
664                         return ret;
665                 }
666         }
667
668         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
669                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
670
671         dpaa2_tm_init(dev);
672
673         return 0;
674 }
675
676 /* Function to setup RX flow information. It contains traffic class ID,
677  * flow ID, destination configuration etc.
678  */
679 static int
680 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
681                          uint16_t rx_queue_id,
682                          uint16_t nb_rx_desc,
683                          unsigned int socket_id __rte_unused,
684                          const struct rte_eth_rxconf *rx_conf,
685                          struct rte_mempool *mb_pool)
686 {
687         struct dpaa2_dev_priv *priv = dev->data->dev_private;
688         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
689         struct dpaa2_queue *dpaa2_q;
690         struct dpni_queue cfg;
691         uint8_t options = 0;
692         uint8_t flow_id;
693         uint32_t bpid;
694         int i, ret;
695
696         PMD_INIT_FUNC_TRACE();
697
698         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
699                         dev, rx_queue_id, mb_pool, rx_conf);
700
701         /* Rx deferred start is not supported */
702         if (rx_conf->rx_deferred_start) {
703                 DPAA2_PMD_ERR("%p:Rx deferred start not supported",
704                                 (void *)dev);
705                 return -EINVAL;
706         }
707
708         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
709                 bpid = mempool_to_bpid(mb_pool);
710                 ret = dpaa2_attach_bp_list(priv,
711                                            rte_dpaa2_bpid_info[bpid].bp_list);
712                 if (ret)
713                         return ret;
714         }
715         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
716         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
717         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
718         dpaa2_q->nb_desc = UINT16_MAX;
719         dpaa2_q->offloads = rx_conf->offloads;
720
721         /*Get the flow id from given VQ id*/
722         flow_id = dpaa2_q->flow_id;
723         memset(&cfg, 0, sizeof(struct dpni_queue));
724
725         options = options | DPNI_QUEUE_OPT_USER_CTX;
726         cfg.user_context = (size_t)(dpaa2_q);
727
728         /* check if a private cgr available. */
729         for (i = 0; i < priv->max_cgs; i++) {
730                 if (!priv->cgid_in_use[i]) {
731                         priv->cgid_in_use[i] = 1;
732                         break;
733                 }
734         }
735
736         if (i < priv->max_cgs) {
737                 options |= DPNI_QUEUE_OPT_SET_CGID;
738                 cfg.cgid = i;
739                 dpaa2_q->cgid = cfg.cgid;
740         } else {
741                 dpaa2_q->cgid = 0xff;
742         }
743
744         /*if ls2088 or rev2 device, enable the stashing */
745
746         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
747                 options |= DPNI_QUEUE_OPT_FLC;
748                 cfg.flc.stash_control = true;
749                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
750                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
751                  * data stashing setting 01 01 00 (0x14)
752                  * (in following order ->DS AS CS)
753                  * to enable 1 line data, 1 line annotation.
754                  * For LX2, this setting should be 01 00 00 (0x10)
755                  */
756                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
757                         cfg.flc.value |= 0x10;
758                 else
759                         cfg.flc.value |= 0x14;
760         }
761         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
762                              dpaa2_q->tc_index, flow_id, options, &cfg);
763         if (ret) {
764                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
765                 return -1;
766         }
767
768         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
769                 struct dpni_taildrop taildrop;
770
771                 taildrop.enable = 1;
772                 dpaa2_q->nb_desc = nb_rx_desc;
773                 /* Private CGR will use tail drop length as nb_rx_desc.
774                  * for rest cases we can use standard byte based tail drop.
775                  * There is no HW restriction, but number of CGRs are limited,
776                  * hence this restriction is placed.
777                  */
778                 if (dpaa2_q->cgid != 0xff) {
779                         /*enabling per rx queue congestion control */
780                         taildrop.threshold = nb_rx_desc;
781                         taildrop.units = DPNI_CONGESTION_UNIT_FRAMES;
782                         taildrop.oal = 0;
783                         DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d",
784                                         rx_queue_id);
785                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
786                                                 DPNI_CP_CONGESTION_GROUP,
787                                                 DPNI_QUEUE_RX,
788                                                 dpaa2_q->tc_index,
789                                                 dpaa2_q->cgid, &taildrop);
790                 } else {
791                         /*enabling per rx queue congestion control */
792                         taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q;
793                         taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
794                         taildrop.oal = CONG_RX_OAL;
795                         DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d",
796                                         rx_queue_id);
797                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
798                                                 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
799                                                 dpaa2_q->tc_index, flow_id,
800                                                 &taildrop);
801                 }
802                 if (ret) {
803                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
804                                       ret);
805                         return -1;
806                 }
807         } else { /* Disable tail Drop */
808                 struct dpni_taildrop taildrop = {0};
809                 DPAA2_PMD_INFO("Tail drop is disabled on queue");
810
811                 taildrop.enable = 0;
812                 if (dpaa2_q->cgid != 0xff) {
813                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
814                                         DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX,
815                                         dpaa2_q->tc_index,
816                                         dpaa2_q->cgid, &taildrop);
817                 } else {
818                         ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
819                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
820                                         dpaa2_q->tc_index, flow_id, &taildrop);
821                 }
822                 if (ret) {
823                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
824                                       ret);
825                         return -1;
826                 }
827         }
828
829         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
830         return 0;
831 }
832
833 static int
834 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
835                          uint16_t tx_queue_id,
836                          uint16_t nb_tx_desc,
837                          unsigned int socket_id __rte_unused,
838                          const struct rte_eth_txconf *tx_conf)
839 {
840         struct dpaa2_dev_priv *priv = dev->data->dev_private;
841         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
842                 priv->tx_vq[tx_queue_id];
843         struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *)
844                 priv->tx_conf_vq[tx_queue_id];
845         struct fsl_mc_io *dpni = dev->process_private;
846         struct dpni_queue tx_conf_cfg;
847         struct dpni_queue tx_flow_cfg;
848         uint8_t options = 0, flow_id;
849         struct dpni_queue_id qid;
850         uint32_t tc_id;
851         int ret;
852
853         PMD_INIT_FUNC_TRACE();
854
855         /* Tx deferred start is not supported */
856         if (tx_conf->tx_deferred_start) {
857                 DPAA2_PMD_ERR("%p:Tx deferred start not supported",
858                                 (void *)dev);
859                 return -EINVAL;
860         }
861
862         dpaa2_q->nb_desc = UINT16_MAX;
863         dpaa2_q->offloads = tx_conf->offloads;
864
865         /* Return if queue already configured */
866         if (dpaa2_q->flow_id != 0xffff) {
867                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
868                 return 0;
869         }
870
871         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
872         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
873
874         tc_id = tx_queue_id;
875         flow_id = 0;
876
877         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
878                         tc_id, flow_id, options, &tx_flow_cfg);
879         if (ret) {
880                 DPAA2_PMD_ERR("Error in setting the tx flow: "
881                         "tc_id=%d, flow=%d err=%d",
882                         tc_id, flow_id, ret);
883                         return -1;
884         }
885
886         dpaa2_q->flow_id = flow_id;
887
888         if (tx_queue_id == 0) {
889                 /*Set tx-conf and error configuration*/
890                 if (priv->flags & DPAA2_TX_CONF_ENABLE)
891                         ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
892                                                             priv->token,
893                                                             DPNI_CONF_AFFINE);
894                 else
895                         ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
896                                                             priv->token,
897                                                             DPNI_CONF_DISABLE);
898                 if (ret) {
899                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
900                                       "err=%d", ret);
901                         return -1;
902                 }
903         }
904         dpaa2_q->tc_index = tc_id;
905
906         ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
907                              DPNI_QUEUE_TX, dpaa2_q->tc_index,
908                              dpaa2_q->flow_id, &tx_flow_cfg, &qid);
909         if (ret) {
910                 DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
911                 return -1;
912         }
913         dpaa2_q->fqid = qid.fqid;
914
915         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
916                 struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
917
918                 dpaa2_q->nb_desc = nb_tx_desc;
919
920                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
921                 cong_notif_cfg.threshold_entry = nb_tx_desc;
922                 /* Notify that the queue is not congested when the data in
923                  * the queue is below this thershold.(90% of value)
924                  */
925                 cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
926                 cong_notif_cfg.message_ctx = 0;
927                 cong_notif_cfg.message_iova =
928                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
929                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
930                 cong_notif_cfg.notification_mode =
931                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
932                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
933                                          DPNI_CONG_OPT_COHERENT_WRITE;
934                 cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
935
936                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
937                                                        priv->token,
938                                                        DPNI_QUEUE_TX,
939                                                        tc_id,
940                                                        &cong_notif_cfg);
941                 if (ret) {
942                         DPAA2_PMD_ERR(
943                            "Error in setting tx congestion notification: "
944                            "err=%d", ret);
945                         return -ret;
946                 }
947         }
948         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
949         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
950
951         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
952                 dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q;
953                 options = options | DPNI_QUEUE_OPT_USER_CTX;
954                 tx_conf_cfg.user_context = (size_t)(dpaa2_q);
955                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
956                              DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
957                              dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
958                 if (ret) {
959                         DPAA2_PMD_ERR("Error in setting the tx conf flow: "
960                               "tc_index=%d, flow=%d err=%d",
961                               dpaa2_tx_conf_q->tc_index,
962                               dpaa2_tx_conf_q->flow_id, ret);
963                         return -1;
964                 }
965
966                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
967                              DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
968                              dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
969                 if (ret) {
970                         DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
971                         return -1;
972                 }
973                 dpaa2_tx_conf_q->fqid = qid.fqid;
974         }
975         return 0;
976 }
977
978 static void
979 dpaa2_dev_rx_queue_release(void *q __rte_unused)
980 {
981         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
982         struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
983         struct fsl_mc_io *dpni =
984                 (struct fsl_mc_io *)priv->eth_dev->process_private;
985         uint8_t options = 0;
986         int ret;
987         struct dpni_queue cfg;
988
989         memset(&cfg, 0, sizeof(struct dpni_queue));
990         PMD_INIT_FUNC_TRACE();
991         if (dpaa2_q->cgid != 0xff) {
992                 options = DPNI_QUEUE_OPT_CLEAR_CGID;
993                 cfg.cgid = dpaa2_q->cgid;
994
995                 ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
996                                      DPNI_QUEUE_RX,
997                                      dpaa2_q->tc_index, dpaa2_q->flow_id,
998                                      options, &cfg);
999                 if (ret)
1000                         DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d",
1001                                         dpaa2_q->fqid, ret);
1002                 priv->cgid_in_use[dpaa2_q->cgid] = 0;
1003                 dpaa2_q->cgid = 0xff;
1004         }
1005 }
1006
1007 static void
1008 dpaa2_dev_tx_queue_release(void *q __rte_unused)
1009 {
1010         PMD_INIT_FUNC_TRACE();
1011 }
1012
1013 static uint32_t
1014 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1015 {
1016         int32_t ret;
1017         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1018         struct dpaa2_queue *dpaa2_q;
1019         struct qbman_swp *swp;
1020         struct qbman_fq_query_np_rslt state;
1021         uint32_t frame_cnt = 0;
1022
1023         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1024                 ret = dpaa2_affine_qbman_swp();
1025                 if (ret) {
1026                         DPAA2_PMD_ERR(
1027                                 "Failed to allocate IO portal, tid: %d\n",
1028                                 rte_gettid());
1029                         return -EINVAL;
1030                 }
1031         }
1032         swp = DPAA2_PER_LCORE_PORTAL;
1033
1034         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
1035
1036         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
1037                 frame_cnt = qbman_fq_state_frame_count(&state);
1038                 DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
1039                                 rx_queue_id, frame_cnt);
1040         }
1041         return frame_cnt;
1042 }
1043
1044 static const uint32_t *
1045 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
1046 {
1047         static const uint32_t ptypes[] = {
1048                 /*todo -= add more types */
1049                 RTE_PTYPE_L2_ETHER,
1050                 RTE_PTYPE_L3_IPV4,
1051                 RTE_PTYPE_L3_IPV4_EXT,
1052                 RTE_PTYPE_L3_IPV6,
1053                 RTE_PTYPE_L3_IPV6_EXT,
1054                 RTE_PTYPE_L4_TCP,
1055                 RTE_PTYPE_L4_UDP,
1056                 RTE_PTYPE_L4_SCTP,
1057                 RTE_PTYPE_L4_ICMP,
1058                 RTE_PTYPE_UNKNOWN
1059         };
1060
1061         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
1062                 dev->rx_pkt_burst == dpaa2_dev_rx ||
1063                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
1064                 return ptypes;
1065         return NULL;
1066 }
1067
1068 /**
1069  * Dpaa2 link Interrupt handler
1070  *
1071  * @param param
1072  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1073  *
1074  * @return
1075  *  void
1076  */
1077 static void
1078 dpaa2_interrupt_handler(void *param)
1079 {
1080         struct rte_eth_dev *dev = param;
1081         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1082         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1083         int ret;
1084         int irq_index = DPNI_IRQ_INDEX;
1085         unsigned int status = 0, clear = 0;
1086
1087         PMD_INIT_FUNC_TRACE();
1088
1089         if (dpni == NULL) {
1090                 DPAA2_PMD_ERR("dpni is NULL");
1091                 return;
1092         }
1093
1094         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
1095                                   irq_index, &status);
1096         if (unlikely(ret)) {
1097                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
1098                 clear = 0xffffffff;
1099                 goto out;
1100         }
1101
1102         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
1103                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
1104                 dpaa2_dev_link_update(dev, 0);
1105                 /* calling all the apps registered for link status event */
1106                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1107         }
1108 out:
1109         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
1110                                     irq_index, clear);
1111         if (unlikely(ret))
1112                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
1113 }
1114
1115 static int
1116 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
1117 {
1118         int err = 0;
1119         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1120         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1121         int irq_index = DPNI_IRQ_INDEX;
1122         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
1123
1124         PMD_INIT_FUNC_TRACE();
1125
1126         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
1127                                 irq_index, mask);
1128         if (err < 0) {
1129                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
1130                               strerror(-err));
1131                 return err;
1132         }
1133
1134         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
1135                                   irq_index, enable);
1136         if (err < 0)
1137                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
1138                               strerror(-err));
1139
1140         return err;
1141 }
1142
1143 static int
1144 dpaa2_dev_start(struct rte_eth_dev *dev)
1145 {
1146         struct rte_device *rdev = dev->device;
1147         struct rte_dpaa2_device *dpaa2_dev;
1148         struct rte_eth_dev_data *data = dev->data;
1149         struct dpaa2_dev_priv *priv = data->dev_private;
1150         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1151         struct dpni_queue cfg;
1152         struct dpni_error_cfg   err_cfg;
1153         uint16_t qdid;
1154         struct dpni_queue_id qid;
1155         struct dpaa2_queue *dpaa2_q;
1156         int ret, i;
1157         struct rte_intr_handle *intr_handle;
1158
1159         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
1160         intr_handle = &dpaa2_dev->intr_handle;
1161
1162         PMD_INIT_FUNC_TRACE();
1163
1164         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1165         if (ret) {
1166                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
1167                               priv->hw_id, ret);
1168                 return ret;
1169         }
1170
1171         /* Power up the phy. Needed to make the link go UP */
1172         dpaa2_dev_set_link_up(dev);
1173
1174         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
1175                             DPNI_QUEUE_TX, &qdid);
1176         if (ret) {
1177                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
1178                 return ret;
1179         }
1180         priv->qdid = qdid;
1181
1182         for (i = 0; i < data->nb_rx_queues; i++) {
1183                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
1184                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1185                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
1186                                        dpaa2_q->flow_id, &cfg, &qid);
1187                 if (ret) {
1188                         DPAA2_PMD_ERR("Error in getting flow information: "
1189                                       "err=%d", ret);
1190                         return ret;
1191                 }
1192                 dpaa2_q->fqid = qid.fqid;
1193         }
1194
1195         if (dpaa2_enable_err_queue) {
1196                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
1197                                      DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid);
1198                 if (ret) {
1199                         DPAA2_PMD_ERR("Error getting rx err flow information: err=%d",
1200                                                 ret);
1201                         return ret;
1202                 }
1203                 dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
1204                 dpaa2_q->fqid = qid.fqid;
1205                 dpaa2_q->eth_data = dev->data;
1206
1207                 err_cfg.errors =  DPNI_ERROR_DISC;
1208                 err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
1209         } else {
1210                 /* checksum errors, send them to normal path
1211                  * and set it in annotation
1212                  */
1213                 err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
1214
1215                 /* if packet with parse error are not to be dropped */
1216                 err_cfg.errors |= DPNI_ERROR_PHE;
1217
1218                 err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
1219         }
1220         err_cfg.set_frame_annotation = true;
1221
1222         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
1223                                        priv->token, &err_cfg);
1224         if (ret) {
1225                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
1226                               ret);
1227                 return ret;
1228         }
1229
1230         /* if the interrupts were configured on this devices*/
1231         if (intr_handle && (intr_handle->fd) &&
1232             (dev->data->dev_conf.intr_conf.lsc != 0)) {
1233                 /* Registering LSC interrupt handler */
1234                 rte_intr_callback_register(intr_handle,
1235                                            dpaa2_interrupt_handler,
1236                                            (void *)dev);
1237
1238                 /* enable vfio intr/eventfd mapping
1239                  * Interrupt index 0 is required, so we can not use
1240                  * rte_intr_enable.
1241                  */
1242                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
1243
1244                 /* enable dpni_irqs */
1245                 dpaa2_eth_setup_irqs(dev, 1);
1246         }
1247
1248         /* Change the tx burst function if ordered queues are used */
1249         if (priv->en_ordered)
1250                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
1251
1252         return 0;
1253 }
1254
1255 /**
1256  *  This routine disables all traffic on the adapter by issuing a
1257  *  global reset on the MAC.
1258  */
1259 static int
1260 dpaa2_dev_stop(struct rte_eth_dev *dev)
1261 {
1262         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1263         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1264         int ret;
1265         struct rte_eth_link link;
1266         struct rte_intr_handle *intr_handle = dev->intr_handle;
1267
1268         PMD_INIT_FUNC_TRACE();
1269
1270         /* reset interrupt callback  */
1271         if (intr_handle && (intr_handle->fd) &&
1272             (dev->data->dev_conf.intr_conf.lsc != 0)) {
1273                 /*disable dpni irqs */
1274                 dpaa2_eth_setup_irqs(dev, 0);
1275
1276                 /* disable vfio intr before callback unregister */
1277                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
1278
1279                 /* Unregistering LSC interrupt handler */
1280                 rte_intr_callback_unregister(intr_handle,
1281                                              dpaa2_interrupt_handler,
1282                                              (void *)dev);
1283         }
1284
1285         dpaa2_dev_set_link_down(dev);
1286
1287         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
1288         if (ret) {
1289                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
1290                               ret, priv->hw_id);
1291                 return ret;
1292         }
1293
1294         /* clear the recorded link status */
1295         memset(&link, 0, sizeof(link));
1296         rte_eth_linkstatus_set(dev, &link);
1297
1298         return 0;
1299 }
1300
1301 static int
1302 dpaa2_dev_close(struct rte_eth_dev *dev)
1303 {
1304         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1305         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1306         int i, ret;
1307         struct rte_eth_link link;
1308
1309         PMD_INIT_FUNC_TRACE();
1310
1311         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1312                 return 0;
1313
1314         if (!dpni) {
1315                 DPAA2_PMD_WARN("Already closed or not started");
1316                 return -1;
1317         }
1318
1319         dpaa2_tm_deinit(dev);
1320         dpaa2_flow_clean(dev);
1321         /* Clean the device first */
1322         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
1323         if (ret) {
1324                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
1325                 return -1;
1326         }
1327
1328         memset(&link, 0, sizeof(link));
1329         rte_eth_linkstatus_set(dev, &link);
1330
1331         /* Free private queues memory */
1332         dpaa2_free_rx_tx_queues(dev);
1333         /* Close the device at underlying layer*/
1334         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1335         if (ret) {
1336                 DPAA2_PMD_ERR("Failure closing dpni device with err code %d",
1337                               ret);
1338         }
1339
1340         /* Free the allocated memory for ethernet private data and dpni*/
1341         priv->hw = NULL;
1342         dev->process_private = NULL;
1343         rte_free(dpni);
1344
1345         for (i = 0; i < MAX_TCS; i++)
1346                 rte_free((void *)(size_t)priv->extract.tc_extract_param[i]);
1347
1348         if (priv->extract.qos_extract_param)
1349                 rte_free((void *)(size_t)priv->extract.qos_extract_param);
1350
1351         DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name);
1352         return 0;
1353 }
1354
1355 static int
1356 dpaa2_dev_promiscuous_enable(
1357                 struct rte_eth_dev *dev)
1358 {
1359         int ret;
1360         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1361         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1362
1363         PMD_INIT_FUNC_TRACE();
1364
1365         if (dpni == NULL) {
1366                 DPAA2_PMD_ERR("dpni is NULL");
1367                 return -ENODEV;
1368         }
1369
1370         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1371         if (ret < 0)
1372                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
1373
1374         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1375         if (ret < 0)
1376                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
1377
1378         return ret;
1379 }
1380
1381 static int
1382 dpaa2_dev_promiscuous_disable(
1383                 struct rte_eth_dev *dev)
1384 {
1385         int ret;
1386         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1387         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1388
1389         PMD_INIT_FUNC_TRACE();
1390
1391         if (dpni == NULL) {
1392                 DPAA2_PMD_ERR("dpni is NULL");
1393                 return -ENODEV;
1394         }
1395
1396         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1397         if (ret < 0)
1398                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1399
1400         if (dev->data->all_multicast == 0) {
1401                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1402                                                  priv->token, false);
1403                 if (ret < 0)
1404                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1405                                       ret);
1406         }
1407
1408         return ret;
1409 }
1410
1411 static int
1412 dpaa2_dev_allmulticast_enable(
1413                 struct rte_eth_dev *dev)
1414 {
1415         int ret;
1416         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1417         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1418
1419         PMD_INIT_FUNC_TRACE();
1420
1421         if (dpni == NULL) {
1422                 DPAA2_PMD_ERR("dpni is NULL");
1423                 return -ENODEV;
1424         }
1425
1426         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1427         if (ret < 0)
1428                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1429
1430         return ret;
1431 }
1432
1433 static int
1434 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1435 {
1436         int ret;
1437         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1438         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1439
1440         PMD_INIT_FUNC_TRACE();
1441
1442         if (dpni == NULL) {
1443                 DPAA2_PMD_ERR("dpni is NULL");
1444                 return -ENODEV;
1445         }
1446
1447         /* must remain on for all promiscuous */
1448         if (dev->data->promiscuous == 1)
1449                 return 0;
1450
1451         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1452         if (ret < 0)
1453                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1454
1455         return ret;
1456 }
1457
1458 static int
1459 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1460 {
1461         int ret;
1462         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1463         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1464         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
1465                                 + VLAN_TAG_SIZE;
1466
1467         PMD_INIT_FUNC_TRACE();
1468
1469         if (dpni == NULL) {
1470                 DPAA2_PMD_ERR("dpni is NULL");
1471                 return -EINVAL;
1472         }
1473
1474         /* check that mtu is within the allowed range */
1475         if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
1476                 return -EINVAL;
1477
1478         if (frame_size > DPAA2_ETH_MAX_LEN)
1479                 dev->data->dev_conf.rxmode.offloads |=
1480                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1481         else
1482                 dev->data->dev_conf.rxmode.offloads &=
1483                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1484
1485         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1486
1487         /* Set the Max Rx frame length as 'mtu' +
1488          * Maximum Ethernet header length
1489          */
1490         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1491                                         frame_size - RTE_ETHER_CRC_LEN);
1492         if (ret) {
1493                 DPAA2_PMD_ERR("Setting the max frame length failed");
1494                 return -1;
1495         }
1496         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1497         return 0;
1498 }
1499
1500 static int
1501 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1502                        struct rte_ether_addr *addr,
1503                        __rte_unused uint32_t index,
1504                        __rte_unused uint32_t pool)
1505 {
1506         int ret;
1507         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1508         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1509
1510         PMD_INIT_FUNC_TRACE();
1511
1512         if (dpni == NULL) {
1513                 DPAA2_PMD_ERR("dpni is NULL");
1514                 return -1;
1515         }
1516
1517         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token,
1518                                 addr->addr_bytes, 0, 0, 0);
1519         if (ret)
1520                 DPAA2_PMD_ERR(
1521                         "error: Adding the MAC ADDR failed: err = %d", ret);
1522         return 0;
1523 }
1524
1525 static void
1526 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1527                           uint32_t index)
1528 {
1529         int ret;
1530         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1531         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1532         struct rte_eth_dev_data *data = dev->data;
1533         struct rte_ether_addr *macaddr;
1534
1535         PMD_INIT_FUNC_TRACE();
1536
1537         macaddr = &data->mac_addrs[index];
1538
1539         if (dpni == NULL) {
1540                 DPAA2_PMD_ERR("dpni is NULL");
1541                 return;
1542         }
1543
1544         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1545                                    priv->token, macaddr->addr_bytes);
1546         if (ret)
1547                 DPAA2_PMD_ERR(
1548                         "error: Removing the MAC ADDR failed: err = %d", ret);
1549 }
1550
1551 static int
1552 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1553                        struct rte_ether_addr *addr)
1554 {
1555         int ret;
1556         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1557         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1558
1559         PMD_INIT_FUNC_TRACE();
1560
1561         if (dpni == NULL) {
1562                 DPAA2_PMD_ERR("dpni is NULL");
1563                 return -EINVAL;
1564         }
1565
1566         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1567                                         priv->token, addr->addr_bytes);
1568
1569         if (ret)
1570                 DPAA2_PMD_ERR(
1571                         "error: Setting the MAC ADDR failed %d", ret);
1572
1573         return ret;
1574 }
1575
1576 static
1577 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1578                          struct rte_eth_stats *stats)
1579 {
1580         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1581         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1582         int32_t  retcode;
1583         uint8_t page0 = 0, page1 = 1, page2 = 2;
1584         union dpni_statistics value;
1585         int i;
1586         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1587
1588         memset(&value, 0, sizeof(union dpni_statistics));
1589
1590         PMD_INIT_FUNC_TRACE();
1591
1592         if (!dpni) {
1593                 DPAA2_PMD_ERR("dpni is NULL");
1594                 return -EINVAL;
1595         }
1596
1597         if (!stats) {
1598                 DPAA2_PMD_ERR("stats is NULL");
1599                 return -EINVAL;
1600         }
1601
1602         /*Get Counters from page_0*/
1603         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1604                                       page0, 0, &value);
1605         if (retcode)
1606                 goto err;
1607
1608         stats->ipackets = value.page_0.ingress_all_frames;
1609         stats->ibytes = value.page_0.ingress_all_bytes;
1610
1611         /*Get Counters from page_1*/
1612         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1613                                       page1, 0, &value);
1614         if (retcode)
1615                 goto err;
1616
1617         stats->opackets = value.page_1.egress_all_frames;
1618         stats->obytes = value.page_1.egress_all_bytes;
1619
1620         /*Get Counters from page_2*/
1621         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1622                                       page2, 0, &value);
1623         if (retcode)
1624                 goto err;
1625
1626         /* Ingress drop frame count due to configured rules */
1627         stats->ierrors = value.page_2.ingress_filtered_frames;
1628         /* Ingress drop frame count due to error */
1629         stats->ierrors += value.page_2.ingress_discarded_frames;
1630
1631         stats->oerrors = value.page_2.egress_discarded_frames;
1632         stats->imissed = value.page_2.ingress_nobuffer_discards;
1633
1634         /* Fill in per queue stats */
1635         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1636                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1637                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1638                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1639                 if (dpaa2_rxq)
1640                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1641                 if (dpaa2_txq)
1642                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1643
1644                 /* Byte counting is not implemented */
1645                 stats->q_ibytes[i]   = 0;
1646                 stats->q_obytes[i]   = 0;
1647         }
1648
1649         return 0;
1650
1651 err:
1652         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1653         return retcode;
1654 };
1655
1656 static int
1657 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1658                      unsigned int n)
1659 {
1660         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1661         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1662         int32_t  retcode;
1663         union dpni_statistics value[5] = {};
1664         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1665
1666         if (n < num)
1667                 return num;
1668
1669         if (xstats == NULL)
1670                 return 0;
1671
1672         /* Get Counters from page_0*/
1673         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1674                                       0, 0, &value[0]);
1675         if (retcode)
1676                 goto err;
1677
1678         /* Get Counters from page_1*/
1679         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1680                                       1, 0, &value[1]);
1681         if (retcode)
1682                 goto err;
1683
1684         /* Get Counters from page_2*/
1685         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1686                                       2, 0, &value[2]);
1687         if (retcode)
1688                 goto err;
1689
1690         for (i = 0; i < priv->max_cgs; i++) {
1691                 if (!priv->cgid_in_use[i]) {
1692                         /* Get Counters from page_4*/
1693                         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW,
1694                                                       priv->token,
1695                                                       4, 0, &value[4]);
1696                         if (retcode)
1697                                 goto err;
1698                         break;
1699                 }
1700         }
1701
1702         for (i = 0; i < num; i++) {
1703                 xstats[i].id = i;
1704                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1705                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1706         }
1707         return i;
1708 err:
1709         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1710         return retcode;
1711 }
1712
1713 static int
1714 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1715                        struct rte_eth_xstat_name *xstats_names,
1716                        unsigned int limit)
1717 {
1718         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1719
1720         if (limit < stat_cnt)
1721                 return stat_cnt;
1722
1723         if (xstats_names != NULL)
1724                 for (i = 0; i < stat_cnt; i++)
1725                         strlcpy(xstats_names[i].name,
1726                                 dpaa2_xstats_strings[i].name,
1727                                 sizeof(xstats_names[i].name));
1728
1729         return stat_cnt;
1730 }
1731
1732 static int
1733 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1734                        uint64_t *values, unsigned int n)
1735 {
1736         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1737         uint64_t values_copy[stat_cnt];
1738
1739         if (!ids) {
1740                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1741                 struct fsl_mc_io *dpni =
1742                         (struct fsl_mc_io *)dev->process_private;
1743                 int32_t  retcode;
1744                 union dpni_statistics value[5] = {};
1745
1746                 if (n < stat_cnt)
1747                         return stat_cnt;
1748
1749                 if (!values)
1750                         return 0;
1751
1752                 /* Get Counters from page_0*/
1753                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1754                                               0, 0, &value[0]);
1755                 if (retcode)
1756                         return 0;
1757
1758                 /* Get Counters from page_1*/
1759                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1760                                               1, 0, &value[1]);
1761                 if (retcode)
1762                         return 0;
1763
1764                 /* Get Counters from page_2*/
1765                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1766                                               2, 0, &value[2]);
1767                 if (retcode)
1768                         return 0;
1769
1770                 /* Get Counters from page_4*/
1771                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1772                                               4, 0, &value[4]);
1773                 if (retcode)
1774                         return 0;
1775
1776                 for (i = 0; i < stat_cnt; i++) {
1777                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1778                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1779                 }
1780                 return stat_cnt;
1781         }
1782
1783         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1784
1785         for (i = 0; i < n; i++) {
1786                 if (ids[i] >= stat_cnt) {
1787                         DPAA2_PMD_ERR("xstats id value isn't valid");
1788                         return -1;
1789                 }
1790                 values[i] = values_copy[ids[i]];
1791         }
1792         return n;
1793 }
1794
1795 static int
1796 dpaa2_xstats_get_names_by_id(
1797         struct rte_eth_dev *dev,
1798         struct rte_eth_xstat_name *xstats_names,
1799         const uint64_t *ids,
1800         unsigned int limit)
1801 {
1802         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1803         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1804
1805         if (!ids)
1806                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1807
1808         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1809
1810         for (i = 0; i < limit; i++) {
1811                 if (ids[i] >= stat_cnt) {
1812                         DPAA2_PMD_ERR("xstats id value isn't valid");
1813                         return -1;
1814                 }
1815                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1816         }
1817         return limit;
1818 }
1819
1820 static int
1821 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1822 {
1823         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1824         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1825         int retcode;
1826         int i;
1827         struct dpaa2_queue *dpaa2_q;
1828
1829         PMD_INIT_FUNC_TRACE();
1830
1831         if (dpni == NULL) {
1832                 DPAA2_PMD_ERR("dpni is NULL");
1833                 return -EINVAL;
1834         }
1835
1836         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1837         if (retcode)
1838                 goto error;
1839
1840         /* Reset the per queue stats in dpaa2_queue structure */
1841         for (i = 0; i < priv->nb_rx_queues; i++) {
1842                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1843                 if (dpaa2_q)
1844                         dpaa2_q->rx_pkts = 0;
1845         }
1846
1847         for (i = 0; i < priv->nb_tx_queues; i++) {
1848                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1849                 if (dpaa2_q)
1850                         dpaa2_q->tx_pkts = 0;
1851         }
1852
1853         return 0;
1854
1855 error:
1856         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1857         return retcode;
1858 };
1859
1860 /* return 0 means link status changed, -1 means not changed */
1861 static int
1862 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1863                       int wait_to_complete)
1864 {
1865         int ret;
1866         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1867         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
1868         struct rte_eth_link link;
1869         struct dpni_link_state state = {0};
1870         uint8_t count;
1871
1872         if (dpni == NULL) {
1873                 DPAA2_PMD_ERR("dpni is NULL");
1874                 return 0;
1875         }
1876
1877         for (count = 0; count <= MAX_REPEAT_TIME; count++) {
1878                 ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token,
1879                                           &state);
1880                 if (ret < 0) {
1881                         DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1882                         return -1;
1883                 }
1884                 if (state.up == ETH_LINK_DOWN &&
1885                     wait_to_complete)
1886                         rte_delay_ms(CHECK_INTERVAL);
1887                 else
1888                         break;
1889         }
1890
1891         memset(&link, 0, sizeof(struct rte_eth_link));
1892         link.link_status = state.up;
1893         link.link_speed = state.rate;
1894
1895         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1896                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1897         else
1898                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1899
1900         ret = rte_eth_linkstatus_set(dev, &link);
1901         if (ret == -1)
1902                 DPAA2_PMD_DEBUG("No change in status");
1903         else
1904                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1905                                link.link_status ? "Up" : "Down");
1906
1907         return ret;
1908 }
1909
1910 /**
1911  * Toggle the DPNI to enable, if not already enabled.
1912  * This is not strictly PHY up/down - it is more of logical toggling.
1913  */
1914 static int
1915 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1916 {
1917         int ret = -EINVAL;
1918         struct dpaa2_dev_priv *priv;
1919         struct fsl_mc_io *dpni;
1920         int en = 0;
1921         struct dpni_link_state state = {0};
1922
1923         priv = dev->data->dev_private;
1924         dpni = (struct fsl_mc_io *)dev->process_private;
1925
1926         if (dpni == NULL) {
1927                 DPAA2_PMD_ERR("dpni is NULL");
1928                 return ret;
1929         }
1930
1931         /* Check if DPNI is currently enabled */
1932         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1933         if (ret) {
1934                 /* Unable to obtain dpni status; Not continuing */
1935                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1936                 return -EINVAL;
1937         }
1938
1939         /* Enable link if not already enabled */
1940         if (!en) {
1941                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1942                 if (ret) {
1943                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1944                         return -EINVAL;
1945                 }
1946         }
1947         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1948         if (ret < 0) {
1949                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1950                 return -1;
1951         }
1952
1953         /* changing tx burst function to start enqueues */
1954         dev->tx_pkt_burst = dpaa2_dev_tx;
1955         dev->data->dev_link.link_status = state.up;
1956         dev->data->dev_link.link_speed = state.rate;
1957
1958         if (state.up)
1959                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1960         else
1961                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1962         return ret;
1963 }
1964
1965 /**
1966  * Toggle the DPNI to disable, if not already disabled.
1967  * This is not strictly PHY up/down - it is more of logical toggling.
1968  */
1969 static int
1970 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1971 {
1972         int ret = -EINVAL;
1973         struct dpaa2_dev_priv *priv;
1974         struct fsl_mc_io *dpni;
1975         int dpni_enabled = 0;
1976         int retries = 10;
1977
1978         PMD_INIT_FUNC_TRACE();
1979
1980         priv = dev->data->dev_private;
1981         dpni = (struct fsl_mc_io *)dev->process_private;
1982
1983         if (dpni == NULL) {
1984                 DPAA2_PMD_ERR("Device has not yet been configured");
1985                 return ret;
1986         }
1987
1988         /*changing  tx burst function to avoid any more enqueues */
1989         dev->tx_pkt_burst = dummy_dev_tx;
1990
1991         /* Loop while dpni_disable() attempts to drain the egress FQs
1992          * and confirm them back to us.
1993          */
1994         do {
1995                 ret = dpni_disable(dpni, 0, priv->token);
1996                 if (ret) {
1997                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1998                         return ret;
1999                 }
2000                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
2001                 if (ret) {
2002                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
2003                         return ret;
2004                 }
2005                 if (dpni_enabled)
2006                         /* Allow the MC some slack */
2007                         rte_delay_us(100 * 1000);
2008         } while (dpni_enabled && --retries);
2009
2010         if (!retries) {
2011                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
2012                 /* todo- we may have to manually cleanup queues.
2013                  */
2014         } else {
2015                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
2016                                dev->data->port_id);
2017         }
2018
2019         dev->data->dev_link.link_status = 0;
2020
2021         return ret;
2022 }
2023
2024 static int
2025 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2026 {
2027         int ret = -EINVAL;
2028         struct dpaa2_dev_priv *priv;
2029         struct fsl_mc_io *dpni;
2030         struct dpni_link_state state = {0};
2031
2032         PMD_INIT_FUNC_TRACE();
2033
2034         priv = dev->data->dev_private;
2035         dpni = (struct fsl_mc_io *)dev->process_private;
2036
2037         if (dpni == NULL || fc_conf == NULL) {
2038                 DPAA2_PMD_ERR("device not configured");
2039                 return ret;
2040         }
2041
2042         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2043         if (ret) {
2044                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
2045                 return ret;
2046         }
2047
2048         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
2049         if (state.options & DPNI_LINK_OPT_PAUSE) {
2050                 /* DPNI_LINK_OPT_PAUSE set
2051                  *  if ASYM_PAUSE not set,
2052                  *      RX Side flow control (handle received Pause frame)
2053                  *      TX side flow control (send Pause frame)
2054                  *  if ASYM_PAUSE set,
2055                  *      RX Side flow control (handle received Pause frame)
2056                  *      No TX side flow control (send Pause frame disabled)
2057                  */
2058                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
2059                         fc_conf->mode = RTE_FC_FULL;
2060                 else
2061                         fc_conf->mode = RTE_FC_RX_PAUSE;
2062         } else {
2063                 /* DPNI_LINK_OPT_PAUSE not set
2064                  *  if ASYM_PAUSE set,
2065                  *      TX side flow control (send Pause frame)
2066                  *      No RX side flow control (No action on pause frame rx)
2067                  *  if ASYM_PAUSE not set,
2068                  *      Flow control disabled
2069                  */
2070                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
2071                         fc_conf->mode = RTE_FC_TX_PAUSE;
2072                 else
2073                         fc_conf->mode = RTE_FC_NONE;
2074         }
2075
2076         return ret;
2077 }
2078
2079 static int
2080 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2081 {
2082         int ret = -EINVAL;
2083         struct dpaa2_dev_priv *priv;
2084         struct fsl_mc_io *dpni;
2085         struct dpni_link_state state = {0};
2086         struct dpni_link_cfg cfg = {0};
2087
2088         PMD_INIT_FUNC_TRACE();
2089
2090         priv = dev->data->dev_private;
2091         dpni = (struct fsl_mc_io *)dev->process_private;
2092
2093         if (dpni == NULL) {
2094                 DPAA2_PMD_ERR("dpni is NULL");
2095                 return ret;
2096         }
2097
2098         /* It is necessary to obtain the current state before setting fc_conf
2099          * as MC would return error in case rate, autoneg or duplex values are
2100          * different.
2101          */
2102         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
2103         if (ret) {
2104                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
2105                 return -1;
2106         }
2107
2108         /* Disable link before setting configuration */
2109         dpaa2_dev_set_link_down(dev);
2110
2111         /* Based on fc_conf, update cfg */
2112         cfg.rate = state.rate;
2113         cfg.options = state.options;
2114
2115         /* update cfg with fc_conf */
2116         switch (fc_conf->mode) {
2117         case RTE_FC_FULL:
2118                 /* Full flow control;
2119                  * OPT_PAUSE set, ASYM_PAUSE not set
2120                  */
2121                 cfg.options |= DPNI_LINK_OPT_PAUSE;
2122                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2123                 break;
2124         case RTE_FC_TX_PAUSE:
2125                 /* Enable RX flow control
2126                  * OPT_PAUSE not set;
2127                  * ASYM_PAUSE set;
2128                  */
2129                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2130                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2131                 break;
2132         case RTE_FC_RX_PAUSE:
2133                 /* Enable TX Flow control
2134                  * OPT_PAUSE set
2135                  * ASYM_PAUSE set
2136                  */
2137                 cfg.options |= DPNI_LINK_OPT_PAUSE;
2138                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
2139                 break;
2140         case RTE_FC_NONE:
2141                 /* Disable Flow control
2142                  * OPT_PAUSE not set
2143                  * ASYM_PAUSE not set
2144                  */
2145                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
2146                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2147                 break;
2148         default:
2149                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
2150                               fc_conf->mode);
2151                 return -1;
2152         }
2153
2154         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
2155         if (ret)
2156                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
2157                               ret);
2158
2159         /* Enable link */
2160         dpaa2_dev_set_link_up(dev);
2161
2162         return ret;
2163 }
2164
2165 static int
2166 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
2167                           struct rte_eth_rss_conf *rss_conf)
2168 {
2169         struct rte_eth_dev_data *data = dev->data;
2170         struct dpaa2_dev_priv *priv = data->dev_private;
2171         struct rte_eth_conf *eth_conf = &data->dev_conf;
2172         int ret, tc_index;
2173
2174         PMD_INIT_FUNC_TRACE();
2175
2176         if (rss_conf->rss_hf) {
2177                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2178                         ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf,
2179                                 tc_index);
2180                         if (ret) {
2181                                 DPAA2_PMD_ERR("Unable to set flow dist on tc%d",
2182                                         tc_index);
2183                                 return ret;
2184                         }
2185                 }
2186         } else {
2187                 for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
2188                         ret = dpaa2_remove_flow_dist(dev, tc_index);
2189                         if (ret) {
2190                                 DPAA2_PMD_ERR(
2191                                         "Unable to remove flow dist on tc%d",
2192                                         tc_index);
2193                                 return ret;
2194                         }
2195                 }
2196         }
2197         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
2198         return 0;
2199 }
2200
2201 static int
2202 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2203                             struct rte_eth_rss_conf *rss_conf)
2204 {
2205         struct rte_eth_dev_data *data = dev->data;
2206         struct rte_eth_conf *eth_conf = &data->dev_conf;
2207
2208         /* dpaa2 does not support rss_key, so length should be 0*/
2209         rss_conf->rss_key_len = 0;
2210         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
2211         return 0;
2212 }
2213
2214 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
2215                 int eth_rx_queue_id,
2216                 struct dpaa2_dpcon_dev *dpcon,
2217                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2218 {
2219         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2220         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2221         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2222         uint8_t flow_id = dpaa2_ethq->flow_id;
2223         struct dpni_queue cfg;
2224         uint8_t options, priority;
2225         int ret;
2226
2227         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
2228                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
2229         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
2230                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
2231         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
2232                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
2233         else
2234                 return -EINVAL;
2235
2236         priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
2237                    (dpcon->num_priorities - 1);
2238
2239         memset(&cfg, 0, sizeof(struct dpni_queue));
2240         options = DPNI_QUEUE_OPT_DEST;
2241         cfg.destination.type = DPNI_DEST_DPCON;
2242         cfg.destination.id = dpcon->dpcon_id;
2243         cfg.destination.priority = priority;
2244
2245         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
2246                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
2247                 cfg.destination.hold_active = 1;
2248         }
2249
2250         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
2251                         !eth_priv->en_ordered) {
2252                 struct opr_cfg ocfg;
2253
2254                 /* Restoration window size = 256 frames */
2255                 ocfg.oprrws = 3;
2256                 /* Restoration window size = 512 frames for LX2 */
2257                 if (dpaa2_svr_family == SVR_LX2160A)
2258                         ocfg.oprrws = 4;
2259                 /* Auto advance NESN window enabled */
2260                 ocfg.oa = 1;
2261                 /* Late arrival window size disabled */
2262                 ocfg.olws = 0;
2263                 /* ORL resource exhaustaion advance NESN disabled */
2264                 ocfg.oeane = 0;
2265                 /* Loose ordering enabled */
2266                 ocfg.oloe = 1;
2267                 eth_priv->en_loose_ordered = 1;
2268                 /* Strict ordering enabled if explicitly set */
2269                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
2270                         ocfg.oloe = 0;
2271                         eth_priv->en_loose_ordered = 0;
2272                 }
2273
2274                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
2275                                    dpaa2_ethq->tc_index, flow_id,
2276                                    OPR_OPT_CREATE, &ocfg);
2277                 if (ret) {
2278                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
2279                         return ret;
2280                 }
2281
2282                 eth_priv->en_ordered = 1;
2283         }
2284
2285         options |= DPNI_QUEUE_OPT_USER_CTX;
2286         cfg.user_context = (size_t)(dpaa2_ethq);
2287
2288         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2289                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
2290         if (ret) {
2291                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2292                 return ret;
2293         }
2294
2295         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
2296
2297         return 0;
2298 }
2299
2300 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
2301                 int eth_rx_queue_id)
2302 {
2303         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
2304         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2305         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
2306         uint8_t flow_id = dpaa2_ethq->flow_id;
2307         struct dpni_queue cfg;
2308         uint8_t options;
2309         int ret;
2310
2311         memset(&cfg, 0, sizeof(struct dpni_queue));
2312         options = DPNI_QUEUE_OPT_DEST;
2313         cfg.destination.type = DPNI_DEST_NONE;
2314
2315         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
2316                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
2317         if (ret)
2318                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
2319
2320         return ret;
2321 }
2322
2323 static int
2324 dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev,
2325                        const struct rte_flow_ops **ops)
2326 {
2327         if (!dev)
2328                 return -ENODEV;
2329
2330         *ops = &dpaa2_flow_ops;
2331         return 0;
2332 }
2333
2334 static void
2335 dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2336         struct rte_eth_rxq_info *qinfo)
2337 {
2338         struct dpaa2_queue *rxq;
2339         struct dpaa2_dev_priv *priv = dev->data->dev_private;
2340         struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
2341         uint16_t max_frame_length;
2342
2343         rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id];
2344
2345         qinfo->mp = rxq->mb_pool;
2346         qinfo->scattered_rx = dev->data->scattered_rx;
2347         qinfo->nb_desc = rxq->nb_desc;
2348         if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
2349                                 &max_frame_length) == 0)
2350                 qinfo->rx_buf_size = max_frame_length;
2351
2352         qinfo->conf.rx_free_thresh = 1;
2353         qinfo->conf.rx_drop_en = 1;
2354         qinfo->conf.rx_deferred_start = 0;
2355         qinfo->conf.offloads = rxq->offloads;
2356 }
2357
2358 static void
2359 dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2360         struct rte_eth_txq_info *qinfo)
2361 {
2362         struct dpaa2_queue *txq;
2363
2364         txq = dev->data->tx_queues[queue_id];
2365
2366         qinfo->nb_desc = txq->nb_desc;
2367         qinfo->conf.tx_thresh.pthresh = 0;
2368         qinfo->conf.tx_thresh.hthresh = 0;
2369         qinfo->conf.tx_thresh.wthresh = 0;
2370
2371         qinfo->conf.tx_free_thresh = 0;
2372         qinfo->conf.tx_rs_thresh = 0;
2373         qinfo->conf.offloads = txq->offloads;
2374         qinfo->conf.tx_deferred_start = 0;
2375 }
2376
2377 static int
2378 dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2379 {
2380         *(const void **)ops = &dpaa2_tm_ops;
2381
2382         return 0;
2383 }
2384
2385 void
2386 rte_pmd_dpaa2_thread_init(void)
2387 {
2388         int ret;
2389
2390         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
2391                 ret = dpaa2_affine_qbman_swp();
2392                 if (ret) {
2393                         DPAA2_PMD_ERR(
2394                                 "Failed to allocate IO portal, tid: %d\n",
2395                                 rte_gettid());
2396                         return;
2397                 }
2398         }
2399 }
2400
2401 static struct eth_dev_ops dpaa2_ethdev_ops = {
2402         .dev_configure    = dpaa2_eth_dev_configure,
2403         .dev_start            = dpaa2_dev_start,
2404         .dev_stop             = dpaa2_dev_stop,
2405         .dev_close            = dpaa2_dev_close,
2406         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
2407         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
2408         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
2409         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
2410         .dev_set_link_up      = dpaa2_dev_set_link_up,
2411         .dev_set_link_down    = dpaa2_dev_set_link_down,
2412         .link_update       = dpaa2_dev_link_update,
2413         .stats_get             = dpaa2_dev_stats_get,
2414         .xstats_get            = dpaa2_dev_xstats_get,
2415         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
2416         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
2417         .xstats_get_names      = dpaa2_xstats_get_names,
2418         .stats_reset       = dpaa2_dev_stats_reset,
2419         .xstats_reset         = dpaa2_dev_stats_reset,
2420         .fw_version_get    = dpaa2_fw_version_get,
2421         .dev_infos_get     = dpaa2_dev_info_get,
2422         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
2423         .mtu_set           = dpaa2_dev_mtu_set,
2424         .vlan_filter_set      = dpaa2_vlan_filter_set,
2425         .vlan_offload_set     = dpaa2_vlan_offload_set,
2426         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
2427         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
2428         .rx_queue_release  = dpaa2_dev_rx_queue_release,
2429         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
2430         .tx_queue_release  = dpaa2_dev_tx_queue_release,
2431         .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
2432         .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
2433         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
2434         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
2435         .mac_addr_add         = dpaa2_dev_add_mac_addr,
2436         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
2437         .mac_addr_set         = dpaa2_dev_set_mac_addr,
2438         .rss_hash_update      = dpaa2_dev_rss_hash_update,
2439         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
2440         .flow_ops_get         = dpaa2_dev_flow_ops_get,
2441         .rxq_info_get         = dpaa2_rxq_info_get,
2442         .txq_info_get         = dpaa2_txq_info_get,
2443         .tm_ops_get           = dpaa2_tm_ops_get,
2444 #if defined(RTE_LIBRTE_IEEE1588)
2445         .timesync_enable      = dpaa2_timesync_enable,
2446         .timesync_disable     = dpaa2_timesync_disable,
2447         .timesync_read_time   = dpaa2_timesync_read_time,
2448         .timesync_write_time  = dpaa2_timesync_write_time,
2449         .timesync_adjust_time = dpaa2_timesync_adjust_time,
2450         .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp,
2451         .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp,
2452 #endif
2453 };
2454
2455 /* Populate the mac address from physically available (u-boot/firmware) and/or
2456  * one set by higher layers like MC (restool) etc.
2457  * Returns the table of MAC entries (multiple entries)
2458  */
2459 static int
2460 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
2461                   struct rte_ether_addr *mac_entry)
2462 {
2463         int ret;
2464         struct rte_ether_addr phy_mac, prime_mac;
2465
2466         memset(&phy_mac, 0, sizeof(struct rte_ether_addr));
2467         memset(&prime_mac, 0, sizeof(struct rte_ether_addr));
2468
2469         /* Get the physical device MAC address */
2470         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2471                                      phy_mac.addr_bytes);
2472         if (ret) {
2473                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
2474                 goto cleanup;
2475         }
2476
2477         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
2478                                         prime_mac.addr_bytes);
2479         if (ret) {
2480                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
2481                 goto cleanup;
2482         }
2483
2484         /* Now that both MAC have been obtained, do:
2485          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
2486          *     and return phy
2487          *  If empty_mac(phy), return prime.
2488          *  if both are empty, create random MAC, set as prime and return
2489          */
2490         if (!rte_is_zero_ether_addr(&phy_mac)) {
2491                 /* If the addresses are not same, overwrite prime */
2492                 if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) {
2493                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2494                                                         priv->token,
2495                                                         phy_mac.addr_bytes);
2496                         if (ret) {
2497                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
2498                                               ret);
2499                                 goto cleanup;
2500                         }
2501                         memcpy(&prime_mac, &phy_mac,
2502                                 sizeof(struct rte_ether_addr));
2503                 }
2504         } else if (rte_is_zero_ether_addr(&prime_mac)) {
2505                 /* In case phys and prime, both are zero, create random MAC */
2506                 rte_eth_random_addr(prime_mac.addr_bytes);
2507                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
2508                                                 priv->token,
2509                                                 prime_mac.addr_bytes);
2510                 if (ret) {
2511                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
2512                         goto cleanup;
2513                 }
2514         }
2515
2516         /* prime_mac the final MAC address */
2517         memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr));
2518         return 0;
2519
2520 cleanup:
2521         return -1;
2522 }
2523
2524 static int
2525 check_devargs_handler(__rte_unused const char *key, const char *value,
2526                       __rte_unused void *opaque)
2527 {
2528         if (strcmp(value, "1"))
2529                 return -1;
2530
2531         return 0;
2532 }
2533
2534 static int
2535 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2536 {
2537         struct rte_kvargs *kvlist;
2538
2539         if (!devargs)
2540                 return 0;
2541
2542         kvlist = rte_kvargs_parse(devargs->args, NULL);
2543         if (!kvlist)
2544                 return 0;
2545
2546         if (!rte_kvargs_count(kvlist, key)) {
2547                 rte_kvargs_free(kvlist);
2548                 return 0;
2549         }
2550
2551         if (rte_kvargs_process(kvlist, key,
2552                                check_devargs_handler, NULL) < 0) {
2553                 rte_kvargs_free(kvlist);
2554                 return 0;
2555         }
2556         rte_kvargs_free(kvlist);
2557
2558         return 1;
2559 }
2560
2561 static int
2562 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2563 {
2564         struct rte_device *dev = eth_dev->device;
2565         struct rte_dpaa2_device *dpaa2_dev;
2566         struct fsl_mc_io *dpni_dev;
2567         struct dpni_attr attr;
2568         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2569         struct dpni_buffer_layout layout;
2570         int ret, hw_id, i;
2571
2572         PMD_INIT_FUNC_TRACE();
2573
2574         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2575         if (!dpni_dev) {
2576                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2577                 return -1;
2578         }
2579         dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
2580         eth_dev->process_private = (void *)dpni_dev;
2581
2582         /* For secondary processes, the primary has done all the work */
2583         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2584                 /* In case of secondary, only burst and ops API need to be
2585                  * plugged.
2586                  */
2587                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2588                 eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count;
2589                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2590                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2591                 else if (dpaa2_get_devargs(dev->devargs,
2592                                         DRIVER_NO_PREFETCH_MODE))
2593                         eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2594                 else
2595                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2596                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2597                 return 0;
2598         }
2599
2600         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2601
2602         hw_id = dpaa2_dev->object_id;
2603         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2604         if (ret) {
2605                 DPAA2_PMD_ERR(
2606                              "Failure in opening dpni@%d with err code %d",
2607                              hw_id, ret);
2608                 rte_free(dpni_dev);
2609                 return -1;
2610         }
2611
2612         /* Clean the device first */
2613         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2614         if (ret) {
2615                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2616                               hw_id, ret);
2617                 goto init_err;
2618         }
2619
2620         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2621         if (ret) {
2622                 DPAA2_PMD_ERR(
2623                              "Failure in get dpni@%d attribute, err code %d",
2624                              hw_id, ret);
2625                 goto init_err;
2626         }
2627
2628         priv->num_rx_tc = attr.num_rx_tcs;
2629         priv->qos_entries = attr.qos_entries;
2630         priv->fs_entries = attr.fs_entries;
2631         priv->dist_queues = attr.num_queues;
2632
2633         /* only if the custom CG is enabled */
2634         if (attr.options & DPNI_OPT_CUSTOM_CG)
2635                 priv->max_cgs = attr.num_cgs;
2636         else
2637                 priv->max_cgs = 0;
2638
2639         for (i = 0; i < priv->max_cgs; i++)
2640                 priv->cgid_in_use[i] = 0;
2641
2642         for (i = 0; i < attr.num_rx_tcs; i++)
2643                 priv->nb_rx_queues += attr.num_queues;
2644
2645         /* Using number of TX queues as number of TX TCs */
2646         priv->nb_tx_queues = attr.num_tx_tcs;
2647
2648         DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
2649                         priv->num_rx_tc, priv->nb_rx_queues,
2650                         priv->nb_tx_queues, priv->max_cgs);
2651
2652         priv->hw = dpni_dev;
2653         priv->hw_id = hw_id;
2654         priv->options = attr.options;
2655         priv->max_mac_filters = attr.mac_filter_entries;
2656         priv->max_vlan_filters = attr.vlan_filter_entries;
2657         priv->flags = 0;
2658 #if defined(RTE_LIBRTE_IEEE1588)
2659         printf("DPDK IEEE1588 is enabled\n");
2660         priv->flags |= DPAA2_TX_CONF_ENABLE;
2661 #endif
2662         /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */
2663         if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) {
2664                 priv->flags |= DPAA2_TX_CONF_ENABLE;
2665                 DPAA2_PMD_INFO("TX_CONF Enabled");
2666         }
2667
2668         if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
2669                 dpaa2_enable_err_queue = 1;
2670                 DPAA2_PMD_INFO("Enable error queue");
2671         }
2672
2673         /* Allocate memory for hardware structure for queues */
2674         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2675         if (ret) {
2676                 DPAA2_PMD_ERR("Queue allocation Failed");
2677                 goto init_err;
2678         }
2679
2680         /* Allocate memory for storing MAC addresses.
2681          * Table of mac_filter_entries size is allocated so that RTE ether lib
2682          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2683          */
2684         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2685                 RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2686         if (eth_dev->data->mac_addrs == NULL) {
2687                 DPAA2_PMD_ERR(
2688                    "Failed to allocate %d bytes needed to store MAC addresses",
2689                    RTE_ETHER_ADDR_LEN * attr.mac_filter_entries);
2690                 ret = -ENOMEM;
2691                 goto init_err;
2692         }
2693
2694         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2695         if (ret) {
2696                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2697                 rte_free(eth_dev->data->mac_addrs);
2698                 eth_dev->data->mac_addrs = NULL;
2699                 goto init_err;
2700         }
2701
2702         /* ... tx buffer layout ... */
2703         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2704         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2705                 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2706                                  DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2707                 layout.pass_timestamp = true;
2708         } else {
2709                 layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2710         }
2711         layout.pass_frame_status = 1;
2712         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2713                                      DPNI_QUEUE_TX, &layout);
2714         if (ret) {
2715                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2716                 goto init_err;
2717         }
2718
2719         /* ... tx-conf and error buffer layout ... */
2720         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2721         if (priv->flags & DPAA2_TX_CONF_ENABLE) {
2722                 layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2723                 layout.pass_timestamp = true;
2724         }
2725         layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2726         layout.pass_frame_status = 1;
2727         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2728                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2729         if (ret) {
2730                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2731                              ret);
2732                 goto init_err;
2733         }
2734
2735         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2736
2737         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2738                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2739                 DPAA2_PMD_INFO("Loopback mode");
2740         } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
2741                 eth_dev->rx_pkt_burst = dpaa2_dev_rx;
2742                 DPAA2_PMD_INFO("No Prefetch mode");
2743         } else {
2744                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2745         }
2746         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2747
2748         /*Init fields w.r.t. classficaition*/
2749         memset(&priv->extract.qos_key_extract, 0,
2750                 sizeof(struct dpaa2_key_extract));
2751         priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
2752         if (!priv->extract.qos_extract_param) {
2753                 DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
2754                             " classificaiton ", ret);
2755                 goto init_err;
2756         }
2757         priv->extract.qos_key_extract.key_info.ipv4_src_offset =
2758                 IP_ADDRESS_OFFSET_INVALID;
2759         priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
2760                 IP_ADDRESS_OFFSET_INVALID;
2761         priv->extract.qos_key_extract.key_info.ipv6_src_offset =
2762                 IP_ADDRESS_OFFSET_INVALID;
2763         priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
2764                 IP_ADDRESS_OFFSET_INVALID;
2765
2766         for (i = 0; i < MAX_TCS; i++) {
2767                 memset(&priv->extract.tc_key_extract[i], 0,
2768                         sizeof(struct dpaa2_key_extract));
2769                 priv->extract.tc_extract_param[i] =
2770                         (size_t)rte_malloc(NULL, 256, 64);
2771                 if (!priv->extract.tc_extract_param[i]) {
2772                         DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
2773                                      ret);
2774                         goto init_err;
2775                 }
2776                 priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
2777                         IP_ADDRESS_OFFSET_INVALID;
2778                 priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
2779                         IP_ADDRESS_OFFSET_INVALID;
2780                 priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
2781                         IP_ADDRESS_OFFSET_INVALID;
2782                 priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
2783                         IP_ADDRESS_OFFSET_INVALID;
2784         }
2785
2786         ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
2787                                         RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN
2788                                         + VLAN_TAG_SIZE);
2789         if (ret) {
2790                 DPAA2_PMD_ERR("Unable to set mtu. check config");
2791                 goto init_err;
2792         }
2793
2794         /*TODO To enable soft parser support DPAA2 driver needs to integrate
2795          * with external entity to receive byte code for software sequence
2796          * and same will be offload to the H/W using MC interface.
2797          * Currently it is assumed that DPAA2 driver has byte code by some
2798          * mean and same if offloaded to H/W.
2799          */
2800         if (getenv("DPAA2_ENABLE_SOFT_PARSER")) {
2801                 WRIOP_SS_INITIALIZER(priv);
2802                 ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS);
2803                 if (ret < 0) {
2804                         DPAA2_PMD_ERR(" Error(%d) in loading softparser\n",
2805                                       ret);
2806                         return ret;
2807                 }
2808
2809                 ret = dpaa2_eth_enable_wriop_soft_parser(priv,
2810                                                          DPNI_SS_INGRESS);
2811                 if (ret < 0) {
2812                         DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n",
2813                                       ret);
2814                         return ret;
2815                 }
2816         }
2817         RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2818         return 0;
2819 init_err:
2820         dpaa2_dev_close(eth_dev);
2821
2822         return ret;
2823 }
2824
2825 static int
2826 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2827                 struct rte_dpaa2_device *dpaa2_dev)
2828 {
2829         struct rte_eth_dev *eth_dev;
2830         struct dpaa2_dev_priv *dev_priv;
2831         int diag;
2832
2833         if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) >
2834                 RTE_PKTMBUF_HEADROOM) {
2835                 DPAA2_PMD_ERR(
2836                 "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)",
2837                 RTE_PKTMBUF_HEADROOM,
2838                 DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE);
2839
2840                 return -1;
2841         }
2842
2843         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2844                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2845                 if (!eth_dev)
2846                         return -ENODEV;
2847                 dev_priv = rte_zmalloc("ethdev private structure",
2848                                        sizeof(struct dpaa2_dev_priv),
2849                                        RTE_CACHE_LINE_SIZE);
2850                 if (dev_priv == NULL) {
2851                         DPAA2_PMD_CRIT(
2852                                 "Unable to allocate memory for private data");
2853                         rte_eth_dev_release_port(eth_dev);
2854                         return -ENOMEM;
2855                 }
2856                 eth_dev->data->dev_private = (void *)dev_priv;
2857                 /* Store a pointer to eth_dev in dev_private */
2858                 dev_priv->eth_dev = eth_dev;
2859         } else {
2860                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2861                 if (!eth_dev) {
2862                         DPAA2_PMD_DEBUG("returning enodev");
2863                         return -ENODEV;
2864                 }
2865         }
2866
2867         eth_dev->device = &dpaa2_dev->device;
2868
2869         dpaa2_dev->eth_dev = eth_dev;
2870         eth_dev->data->rx_mbuf_alloc_failed = 0;
2871
2872         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2873                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2874
2875         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2876
2877         /* Invoke PMD device initialization function */
2878         diag = dpaa2_dev_init(eth_dev);
2879         if (diag == 0) {
2880                 rte_eth_dev_probing_finish(eth_dev);
2881                 return 0;
2882         }
2883
2884         rte_eth_dev_release_port(eth_dev);
2885         return diag;
2886 }
2887
2888 static int
2889 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2890 {
2891         struct rte_eth_dev *eth_dev;
2892         int ret;
2893
2894         eth_dev = dpaa2_dev->eth_dev;
2895         dpaa2_dev_close(eth_dev);
2896         ret = rte_eth_dev_release_port(eth_dev);
2897
2898         return ret;
2899 }
2900
2901 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2902         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2903         .drv_type = DPAA2_ETH,
2904         .probe = rte_dpaa2_probe,
2905         .remove = rte_dpaa2_remove,
2906 };
2907
2908 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2909 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2910                 DRIVER_LOOPBACK_MODE "=<int> "
2911                 DRIVER_NO_PREFETCH_MODE "=<int>"
2912                 DRIVER_TX_CONF "=<int>"
2913                 DRIVER_ERROR_QUEUE "=<int>");
2914 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);