net/dpaa2: add missing device info fields
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20
21 #include "dpaa2_pmd_logs.h"
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_mempool.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <mc/fsl_dpmng.h>
27 #include "dpaa2_ethdev.h"
28 #include <fsl_qbman_debug.h>
29
30 /* Supported Rx offloads */
31 static uint64_t dev_rx_offloads_sup =
32                 DEV_RX_OFFLOAD_VLAN_STRIP |
33                 DEV_RX_OFFLOAD_IPV4_CKSUM |
34                 DEV_RX_OFFLOAD_UDP_CKSUM |
35                 DEV_RX_OFFLOAD_TCP_CKSUM |
36                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
37                 DEV_RX_OFFLOAD_VLAN_FILTER |
38                 DEV_RX_OFFLOAD_JUMBO_FRAME;
39
40 /* Rx offloads which cannot be disabled */
41 static uint64_t dev_rx_offloads_nodis =
42                 DEV_RX_OFFLOAD_CRC_STRIP |
43                 DEV_RX_OFFLOAD_SCATTER;
44
45 /* Supported Tx offloads */
46 static uint64_t dev_tx_offloads_sup =
47                 DEV_TX_OFFLOAD_VLAN_INSERT |
48                 DEV_TX_OFFLOAD_IPV4_CKSUM |
49                 DEV_TX_OFFLOAD_UDP_CKSUM |
50                 DEV_TX_OFFLOAD_TCP_CKSUM |
51                 DEV_TX_OFFLOAD_SCTP_CKSUM |
52                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
53
54 /* Tx offloads which cannot be disabled */
55 static uint64_t dev_tx_offloads_nodis =
56                 DEV_TX_OFFLOAD_MULTI_SEGS |
57                 DEV_TX_OFFLOAD_MT_LOCKFREE |
58                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
59
60 struct rte_dpaa2_xstats_name_off {
61         char name[RTE_ETH_XSTATS_NAME_SIZE];
62         uint8_t page_id; /* dpni statistics page id */
63         uint8_t stats_id; /* stats id in the given page */
64 };
65
66 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
67         {"ingress_multicast_frames", 0, 2},
68         {"ingress_multicast_bytes", 0, 3},
69         {"ingress_broadcast_frames", 0, 4},
70         {"ingress_broadcast_bytes", 0, 5},
71         {"egress_multicast_frames", 1, 2},
72         {"egress_multicast_bytes", 1, 3},
73         {"egress_broadcast_frames", 1, 4},
74         {"egress_broadcast_bytes", 1, 5},
75         {"ingress_filtered_frames", 2, 0},
76         {"ingress_discarded_frames", 2, 1},
77         {"ingress_nobuffer_discards", 2, 2},
78         {"egress_discarded_frames", 2, 3},
79         {"egress_confirmed_frames", 2, 4},
80 };
81
82 static struct rte_dpaa2_driver rte_dpaa2_pmd;
83 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
84 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
85                                  int wait_to_complete);
86 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
87 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
88 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
89
90 int dpaa2_logtype_pmd;
91
92 static int
93 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
94 {
95         int ret;
96         struct dpaa2_dev_priv *priv = dev->data->dev_private;
97         struct fsl_mc_io *dpni = priv->hw;
98
99         PMD_INIT_FUNC_TRACE();
100
101         if (dpni == NULL) {
102                 DPAA2_PMD_ERR("dpni is NULL");
103                 return -1;
104         }
105
106         if (on)
107                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
108                                        priv->token, vlan_id);
109         else
110                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
111                                           priv->token, vlan_id);
112
113         if (ret < 0)
114                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
115                               ret, vlan_id, priv->hw_id);
116
117         return ret;
118 }
119
120 static int
121 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
122 {
123         struct dpaa2_dev_priv *priv = dev->data->dev_private;
124         struct fsl_mc_io *dpni = priv->hw;
125         int ret;
126
127         PMD_INIT_FUNC_TRACE();
128
129         if (mask & ETH_VLAN_FILTER_MASK) {
130                 /* VLAN Filter not avaialble */
131                 if (!priv->max_vlan_filters) {
132                         DPAA2_PMD_INFO("VLAN filter not available");
133                         goto next_mask;
134                 }
135
136                 if (dev->data->dev_conf.rxmode.offloads &
137                         DEV_RX_OFFLOAD_VLAN_FILTER)
138                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
139                                                       priv->token, true);
140                 else
141                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
142                                                       priv->token, false);
143                 if (ret < 0)
144                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
145         }
146 next_mask:
147         if (mask & ETH_VLAN_EXTEND_MASK) {
148                 if (dev->data->dev_conf.rxmode.offloads &
149                         DEV_RX_OFFLOAD_VLAN_EXTEND)
150                         DPAA2_PMD_INFO("VLAN extend offload not supported");
151         }
152
153         return 0;
154 }
155
156 static int
157 dpaa2_fw_version_get(struct rte_eth_dev *dev,
158                      char *fw_version,
159                      size_t fw_size)
160 {
161         int ret;
162         struct dpaa2_dev_priv *priv = dev->data->dev_private;
163         struct fsl_mc_io *dpni = priv->hw;
164         struct mc_soc_version mc_plat_info = {0};
165         struct mc_version mc_ver_info = {0};
166
167         PMD_INIT_FUNC_TRACE();
168
169         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
170                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
171
172         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
173                 DPAA2_PMD_WARN("\tmc_get_version failed");
174
175         ret = snprintf(fw_version, fw_size,
176                        "%x-%d.%d.%d",
177                        mc_plat_info.svr,
178                        mc_ver_info.major,
179                        mc_ver_info.minor,
180                        mc_ver_info.revision);
181
182         ret += 1; /* add the size of '\0' */
183         if (fw_size < (uint32_t)ret)
184                 return ret;
185         else
186                 return 0;
187 }
188
189 static void
190 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
191 {
192         struct dpaa2_dev_priv *priv = dev->data->dev_private;
193
194         PMD_INIT_FUNC_TRACE();
195
196         dev_info->if_index = priv->hw_id;
197
198         dev_info->max_mac_addrs = priv->max_mac_filters;
199         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
200         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
201         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
202         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
203         dev_info->rx_offload_capa = dev_rx_offloads_sup |
204                                         dev_rx_offloads_nodis;
205         dev_info->tx_offload_capa = dev_tx_offloads_sup |
206                                         dev_tx_offloads_nodis;
207         dev_info->speed_capa = ETH_LINK_SPEED_1G |
208                         ETH_LINK_SPEED_2_5G |
209                         ETH_LINK_SPEED_10G;
210
211         dev_info->max_hash_mac_addrs = 0;
212         dev_info->max_vfs = 0;
213         dev_info->max_vmdq_pools = ETH_16_POOLS;
214         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
215 }
216
217 static int
218 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
219 {
220         struct dpaa2_dev_priv *priv = dev->data->dev_private;
221         uint16_t dist_idx;
222         uint32_t vq_id;
223         struct dpaa2_queue *mc_q, *mcq;
224         uint32_t tot_queues;
225         int i;
226         struct dpaa2_queue *dpaa2_q;
227
228         PMD_INIT_FUNC_TRACE();
229
230         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
231         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
232                           RTE_CACHE_LINE_SIZE);
233         if (!mc_q) {
234                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
235                 return -1;
236         }
237
238         for (i = 0; i < priv->nb_rx_queues; i++) {
239                 mc_q->dev = dev;
240                 priv->rx_vq[i] = mc_q++;
241                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
242                 dpaa2_q->q_storage = rte_malloc("dq_storage",
243                                         sizeof(struct queue_storage_info_t),
244                                         RTE_CACHE_LINE_SIZE);
245                 if (!dpaa2_q->q_storage)
246                         goto fail;
247
248                 memset(dpaa2_q->q_storage, 0,
249                        sizeof(struct queue_storage_info_t));
250                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
251                         goto fail;
252         }
253
254         for (i = 0; i < priv->nb_tx_queues; i++) {
255                 mc_q->dev = dev;
256                 mc_q->flow_id = 0xffff;
257                 priv->tx_vq[i] = mc_q++;
258                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
259                 dpaa2_q->cscn = rte_malloc(NULL,
260                                            sizeof(struct qbman_result), 16);
261                 if (!dpaa2_q->cscn)
262                         goto fail_tx;
263         }
264
265         vq_id = 0;
266         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
267                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
268                 mcq->tc_index = DPAA2_DEF_TC;
269                 mcq->flow_id = dist_idx;
270                 vq_id++;
271         }
272
273         return 0;
274 fail_tx:
275         i -= 1;
276         while (i >= 0) {
277                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
278                 rte_free(dpaa2_q->cscn);
279                 priv->tx_vq[i--] = NULL;
280         }
281         i = priv->nb_rx_queues;
282 fail:
283         i -= 1;
284         mc_q = priv->rx_vq[0];
285         while (i >= 0) {
286                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
287                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
288                 rte_free(dpaa2_q->q_storage);
289                 priv->rx_vq[i--] = NULL;
290         }
291         rte_free(mc_q);
292         return -1;
293 }
294
295 static int
296 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
297 {
298         struct dpaa2_dev_priv *priv = dev->data->dev_private;
299         struct fsl_mc_io *dpni = priv->hw;
300         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
301         uint64_t rx_offloads = eth_conf->rxmode.offloads;
302         uint64_t tx_offloads = eth_conf->txmode.offloads;
303         int rx_l3_csum_offload = false;
304         int rx_l4_csum_offload = false;
305         int tx_l3_csum_offload = false;
306         int tx_l4_csum_offload = false;
307         int ret;
308
309         PMD_INIT_FUNC_TRACE();
310
311         /* Rx offloads validation */
312         if (~(dev_rx_offloads_sup | dev_rx_offloads_nodis) & rx_offloads) {
313                 DPAA2_PMD_ERR(
314                 "Rx offloads non supported - requested 0x%" PRIx64
315                 " supported 0x%" PRIx64,
316                         rx_offloads,
317                         dev_rx_offloads_sup | dev_rx_offloads_nodis);
318                 return -ENOTSUP;
319         }
320         if (dev_rx_offloads_nodis & ~rx_offloads) {
321                 DPAA2_PMD_WARN(
322                 "Rx offloads non configurable - requested 0x%" PRIx64
323                 " ignored 0x%" PRIx64,
324                         rx_offloads, dev_rx_offloads_nodis);
325         }
326
327         /* Tx offloads validation */
328         if (~(dev_tx_offloads_sup | dev_tx_offloads_nodis) & tx_offloads) {
329                 DPAA2_PMD_ERR(
330                 "Tx offloads non supported - requested 0x%" PRIx64
331                 " supported 0x%" PRIx64,
332                         tx_offloads,
333                         dev_tx_offloads_sup | dev_tx_offloads_nodis);
334                 return -ENOTSUP;
335         }
336         if (dev_tx_offloads_nodis & ~tx_offloads) {
337                 DPAA2_PMD_WARN(
338                 "Tx offloads non configurable - requested 0x%" PRIx64
339                 " ignored 0x%" PRIx64,
340                         tx_offloads, dev_tx_offloads_nodis);
341         }
342
343         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
344                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
345                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
346                                 priv->token, eth_conf->rxmode.max_rx_pkt_len);
347                         if (ret) {
348                                 DPAA2_PMD_ERR(
349                                         "Unable to set mtu. check config");
350                                 return ret;
351                         }
352                 } else {
353                         return -1;
354                 }
355         }
356
357         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
358                 ret = dpaa2_setup_flow_dist(dev,
359                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
360                 if (ret) {
361                         DPAA2_PMD_ERR("Unable to set flow distribution."
362                                       "Check queue config");
363                         return ret;
364                 }
365         }
366
367         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
368                 rx_l3_csum_offload = true;
369
370         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
371                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
372                 rx_l4_csum_offload = true;
373
374         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
375                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
376         if (ret) {
377                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
378                 return ret;
379         }
380
381         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
382                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
383         if (ret) {
384                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
385                 return ret;
386         }
387
388         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
389                 tx_l3_csum_offload = true;
390
391         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
392                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
393                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
394                 tx_l4_csum_offload = true;
395
396         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
397                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
398         if (ret) {
399                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
400                 return ret;
401         }
402
403         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
404                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
405         if (ret) {
406                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
407                 return ret;
408         }
409
410         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
411          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
412          * to 0 for LS2 in the hardware thus disabling data/annotation
413          * stashing. For LX2 this is fixed in hardware and thus hash result and
414          * parse results can be received in FD using this option.
415          */
416         if (dpaa2_svr_family == SVR_LX2160A) {
417                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
418                                        DPNI_FLCTYPE_HASH, true);
419                 if (ret) {
420                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
421                         return ret;
422                 }
423         }
424
425         dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
426
427         /* update the current status */
428         dpaa2_dev_link_update(dev, 0);
429
430         return 0;
431 }
432
433 /* Function to setup RX flow information. It contains traffic class ID,
434  * flow ID, destination configuration etc.
435  */
436 static int
437 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
438                          uint16_t rx_queue_id,
439                          uint16_t nb_rx_desc __rte_unused,
440                          unsigned int socket_id __rte_unused,
441                          const struct rte_eth_rxconf *rx_conf __rte_unused,
442                          struct rte_mempool *mb_pool)
443 {
444         struct dpaa2_dev_priv *priv = dev->data->dev_private;
445         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
446         struct dpaa2_queue *dpaa2_q;
447         struct dpni_queue cfg;
448         uint8_t options = 0;
449         uint8_t flow_id;
450         uint32_t bpid;
451         int ret;
452
453         PMD_INIT_FUNC_TRACE();
454
455         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
456                         dev, rx_queue_id, mb_pool, rx_conf);
457
458         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
459                 bpid = mempool_to_bpid(mb_pool);
460                 ret = dpaa2_attach_bp_list(priv,
461                                            rte_dpaa2_bpid_info[bpid].bp_list);
462                 if (ret)
463                         return ret;
464         }
465         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
466         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
467
468         /*Get the flow id from given VQ id*/
469         flow_id = rx_queue_id % priv->nb_rx_queues;
470         memset(&cfg, 0, sizeof(struct dpni_queue));
471
472         options = options | DPNI_QUEUE_OPT_USER_CTX;
473         cfg.user_context = (size_t)(dpaa2_q);
474
475         /*if ls2088 or rev2 device, enable the stashing */
476
477         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
478                 options |= DPNI_QUEUE_OPT_FLC;
479                 cfg.flc.stash_control = true;
480                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
481                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
482                  * data stashing setting 01 01 00 (0x14)
483                  * (in following order ->DS AS CS)
484                  * to enable 1 line data, 1 line annotation.
485                  * For LX2, this setting should be 01 00 00 (0x10)
486                  */
487                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
488                         cfg.flc.value |= 0x10;
489                 else
490                         cfg.flc.value |= 0x14;
491         }
492         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
493                              dpaa2_q->tc_index, flow_id, options, &cfg);
494         if (ret) {
495                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
496                 return -1;
497         }
498
499         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
500                 struct dpni_taildrop taildrop;
501
502                 taildrop.enable = 1;
503                 /*enabling per rx queue congestion control */
504                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
505                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
506                 taildrop.oal = CONG_RX_OAL;
507                 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
508                                 rx_queue_id);
509                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
510                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
511                                         dpaa2_q->tc_index, flow_id, &taildrop);
512                 if (ret) {
513                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
514                                       ret);
515                         return -1;
516                 }
517         }
518
519         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
520         return 0;
521 }
522
523 static int
524 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
525                          uint16_t tx_queue_id,
526                          uint16_t nb_tx_desc __rte_unused,
527                          unsigned int socket_id __rte_unused,
528                          const struct rte_eth_txconf *tx_conf __rte_unused)
529 {
530         struct dpaa2_dev_priv *priv = dev->data->dev_private;
531         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
532                 priv->tx_vq[tx_queue_id];
533         struct fsl_mc_io *dpni = priv->hw;
534         struct dpni_queue tx_conf_cfg;
535         struct dpni_queue tx_flow_cfg;
536         uint8_t options = 0, flow_id;
537         uint32_t tc_id;
538         int ret;
539
540         PMD_INIT_FUNC_TRACE();
541
542         /* Return if queue already configured */
543         if (dpaa2_q->flow_id != 0xffff) {
544                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
545                 return 0;
546         }
547
548         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
549         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
550
551         tc_id = tx_queue_id;
552         flow_id = 0;
553
554         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
555                              tc_id, flow_id, options, &tx_flow_cfg);
556         if (ret) {
557                 DPAA2_PMD_ERR("Error in setting the tx flow: "
558                               "tc_id=%d, flow=%d err=%d",
559                               tc_id, flow_id, ret);
560                         return -1;
561         }
562
563         dpaa2_q->flow_id = flow_id;
564
565         if (tx_queue_id == 0) {
566                 /*Set tx-conf and error configuration*/
567                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
568                                                     priv->token,
569                                                     DPNI_CONF_DISABLE);
570                 if (ret) {
571                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
572                                       "err=%d", ret);
573                         return -1;
574                 }
575         }
576         dpaa2_q->tc_index = tc_id;
577
578         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
579                 struct dpni_congestion_notification_cfg cong_notif_cfg;
580
581                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
582                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
583                 /* Notify that the queue is not congested when the data in
584                  * the queue is below this thershold.
585                  */
586                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
587                 cong_notif_cfg.message_ctx = 0;
588                 cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
589                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
590                 cong_notif_cfg.notification_mode =
591                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
592                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
593                                          DPNI_CONG_OPT_COHERENT_WRITE;
594
595                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
596                                                        priv->token,
597                                                        DPNI_QUEUE_TX,
598                                                        tc_id,
599                                                        &cong_notif_cfg);
600                 if (ret) {
601                         DPAA2_PMD_ERR(
602                            "Error in setting tx congestion notification: "
603                            "err=%d", ret);
604                         return -ret;
605                 }
606         }
607         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
608         return 0;
609 }
610
611 static void
612 dpaa2_dev_rx_queue_release(void *q __rte_unused)
613 {
614         PMD_INIT_FUNC_TRACE();
615 }
616
617 static void
618 dpaa2_dev_tx_queue_release(void *q __rte_unused)
619 {
620         PMD_INIT_FUNC_TRACE();
621 }
622
623 static uint32_t
624 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
625 {
626         int32_t ret;
627         struct dpaa2_dev_priv *priv = dev->data->dev_private;
628         struct dpaa2_queue *dpaa2_q;
629         struct qbman_swp *swp;
630         struct qbman_fq_query_np_rslt state;
631         uint32_t frame_cnt = 0;
632
633         PMD_INIT_FUNC_TRACE();
634
635         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
636                 ret = dpaa2_affine_qbman_swp();
637                 if (ret) {
638                         DPAA2_PMD_ERR("Failure in affining portal");
639                         return -EINVAL;
640                 }
641         }
642         swp = DPAA2_PER_LCORE_PORTAL;
643
644         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
645
646         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
647                 frame_cnt = qbman_fq_state_frame_count(&state);
648                 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
649                                 rx_queue_id, frame_cnt);
650         }
651         return frame_cnt;
652 }
653
654 static const uint32_t *
655 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
656 {
657         static const uint32_t ptypes[] = {
658                 /*todo -= add more types */
659                 RTE_PTYPE_L2_ETHER,
660                 RTE_PTYPE_L3_IPV4,
661                 RTE_PTYPE_L3_IPV4_EXT,
662                 RTE_PTYPE_L3_IPV6,
663                 RTE_PTYPE_L3_IPV6_EXT,
664                 RTE_PTYPE_L4_TCP,
665                 RTE_PTYPE_L4_UDP,
666                 RTE_PTYPE_L4_SCTP,
667                 RTE_PTYPE_L4_ICMP,
668                 RTE_PTYPE_UNKNOWN
669         };
670
671         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
672                 return ptypes;
673         return NULL;
674 }
675
676 /**
677  * Dpaa2 link Interrupt handler
678  *
679  * @param param
680  *  The address of parameter (struct rte_eth_dev *) regsitered before.
681  *
682  * @return
683  *  void
684  */
685 static void
686 dpaa2_interrupt_handler(void *param)
687 {
688         struct rte_eth_dev *dev = param;
689         struct dpaa2_dev_priv *priv = dev->data->dev_private;
690         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
691         int ret;
692         int irq_index = DPNI_IRQ_INDEX;
693         unsigned int status = 0, clear = 0;
694
695         PMD_INIT_FUNC_TRACE();
696
697         if (dpni == NULL) {
698                 DPAA2_PMD_ERR("dpni is NULL");
699                 return;
700         }
701
702         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
703                                   irq_index, &status);
704         if (unlikely(ret)) {
705                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
706                 clear = 0xffffffff;
707                 goto out;
708         }
709
710         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
711                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
712                 dpaa2_dev_link_update(dev, 0);
713                 /* calling all the apps registered for link status event */
714                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
715                                               NULL);
716         }
717 out:
718         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
719                                     irq_index, clear);
720         if (unlikely(ret))
721                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
722 }
723
724 static int
725 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
726 {
727         int err = 0;
728         struct dpaa2_dev_priv *priv = dev->data->dev_private;
729         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
730         int irq_index = DPNI_IRQ_INDEX;
731         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
732
733         PMD_INIT_FUNC_TRACE();
734
735         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
736                                 irq_index, mask);
737         if (err < 0) {
738                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
739                               strerror(-err));
740                 return err;
741         }
742
743         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
744                                   irq_index, enable);
745         if (err < 0)
746                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
747                               strerror(-err));
748
749         return err;
750 }
751
752 static int
753 dpaa2_dev_start(struct rte_eth_dev *dev)
754 {
755         struct rte_device *rdev = dev->device;
756         struct rte_dpaa2_device *dpaa2_dev;
757         struct rte_eth_dev_data *data = dev->data;
758         struct dpaa2_dev_priv *priv = data->dev_private;
759         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
760         struct dpni_queue cfg;
761         struct dpni_error_cfg   err_cfg;
762         uint16_t qdid;
763         struct dpni_queue_id qid;
764         struct dpaa2_queue *dpaa2_q;
765         int ret, i;
766         struct rte_intr_handle *intr_handle;
767
768         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
769         intr_handle = &dpaa2_dev->intr_handle;
770
771         PMD_INIT_FUNC_TRACE();
772
773         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
774         if (ret) {
775                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
776                               priv->hw_id, ret);
777                 return ret;
778         }
779
780         /* Power up the phy. Needed to make the link go UP */
781         dpaa2_dev_set_link_up(dev);
782
783         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
784                             DPNI_QUEUE_TX, &qdid);
785         if (ret) {
786                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
787                 return ret;
788         }
789         priv->qdid = qdid;
790
791         for (i = 0; i < data->nb_rx_queues; i++) {
792                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
793                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
794                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
795                                        dpaa2_q->flow_id, &cfg, &qid);
796                 if (ret) {
797                         DPAA2_PMD_ERR("Error in getting flow information: "
798                                       "err=%d", ret);
799                         return ret;
800                 }
801                 dpaa2_q->fqid = qid.fqid;
802         }
803
804         /*checksum errors, send them to normal path and set it in annotation */
805         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
806
807         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
808         err_cfg.set_frame_annotation = true;
809
810         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
811                                        priv->token, &err_cfg);
812         if (ret) {
813                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
814                               ret);
815                 return ret;
816         }
817
818         /* if the interrupts were configured on this devices*/
819         if (intr_handle && (intr_handle->fd) &&
820             (dev->data->dev_conf.intr_conf.lsc != 0)) {
821                 /* Registering LSC interrupt handler */
822                 rte_intr_callback_register(intr_handle,
823                                            dpaa2_interrupt_handler,
824                                            (void *)dev);
825
826                 /* enable vfio intr/eventfd mapping
827                  * Interrupt index 0 is required, so we can not use
828                  * rte_intr_enable.
829                  */
830                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
831
832                 /* enable dpni_irqs */
833                 dpaa2_eth_setup_irqs(dev, 1);
834         }
835
836         return 0;
837 }
838
839 /**
840  *  This routine disables all traffic on the adapter by issuing a
841  *  global reset on the MAC.
842  */
843 static void
844 dpaa2_dev_stop(struct rte_eth_dev *dev)
845 {
846         struct dpaa2_dev_priv *priv = dev->data->dev_private;
847         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
848         int ret;
849         struct rte_eth_link link;
850         struct rte_intr_handle *intr_handle = dev->intr_handle;
851
852         PMD_INIT_FUNC_TRACE();
853
854         /* reset interrupt callback  */
855         if (intr_handle && (intr_handle->fd) &&
856             (dev->data->dev_conf.intr_conf.lsc != 0)) {
857                 /*disable dpni irqs */
858                 dpaa2_eth_setup_irqs(dev, 0);
859
860                 /* disable vfio intr before callback unregister */
861                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
862
863                 /* Unregistering LSC interrupt handler */
864                 rte_intr_callback_unregister(intr_handle,
865                                              dpaa2_interrupt_handler,
866                                              (void *)dev);
867         }
868
869         dpaa2_dev_set_link_down(dev);
870
871         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
872         if (ret) {
873                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
874                               ret, priv->hw_id);
875                 return;
876         }
877
878         /* clear the recorded link status */
879         memset(&link, 0, sizeof(link));
880         rte_eth_linkstatus_set(dev, &link);
881 }
882
883 static void
884 dpaa2_dev_close(struct rte_eth_dev *dev)
885 {
886         struct rte_eth_dev_data *data = dev->data;
887         struct dpaa2_dev_priv *priv = dev->data->dev_private;
888         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
889         int i, ret;
890         struct rte_eth_link link;
891         struct dpaa2_queue *dpaa2_q;
892
893         PMD_INIT_FUNC_TRACE();
894
895         for (i = 0; i < data->nb_tx_queues; i++) {
896                 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
897                 if (!dpaa2_q->cscn) {
898                         rte_free(dpaa2_q->cscn);
899                         dpaa2_q->cscn = NULL;
900                 }
901         }
902
903         /* Clean the device first */
904         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
905         if (ret) {
906                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
907                 return;
908         }
909
910         memset(&link, 0, sizeof(link));
911         rte_eth_linkstatus_set(dev, &link);
912 }
913
914 static void
915 dpaa2_dev_promiscuous_enable(
916                 struct rte_eth_dev *dev)
917 {
918         int ret;
919         struct dpaa2_dev_priv *priv = dev->data->dev_private;
920         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
921
922         PMD_INIT_FUNC_TRACE();
923
924         if (dpni == NULL) {
925                 DPAA2_PMD_ERR("dpni is NULL");
926                 return;
927         }
928
929         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
930         if (ret < 0)
931                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
932
933         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
934         if (ret < 0)
935                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
936 }
937
938 static void
939 dpaa2_dev_promiscuous_disable(
940                 struct rte_eth_dev *dev)
941 {
942         int ret;
943         struct dpaa2_dev_priv *priv = dev->data->dev_private;
944         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
945
946         PMD_INIT_FUNC_TRACE();
947
948         if (dpni == NULL) {
949                 DPAA2_PMD_ERR("dpni is NULL");
950                 return;
951         }
952
953         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
954         if (ret < 0)
955                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
956
957         if (dev->data->all_multicast == 0) {
958                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
959                                                  priv->token, false);
960                 if (ret < 0)
961                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
962                                       ret);
963         }
964 }
965
966 static void
967 dpaa2_dev_allmulticast_enable(
968                 struct rte_eth_dev *dev)
969 {
970         int ret;
971         struct dpaa2_dev_priv *priv = dev->data->dev_private;
972         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
973
974         PMD_INIT_FUNC_TRACE();
975
976         if (dpni == NULL) {
977                 DPAA2_PMD_ERR("dpni is NULL");
978                 return;
979         }
980
981         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
982         if (ret < 0)
983                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
984 }
985
986 static void
987 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
988 {
989         int ret;
990         struct dpaa2_dev_priv *priv = dev->data->dev_private;
991         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
992
993         PMD_INIT_FUNC_TRACE();
994
995         if (dpni == NULL) {
996                 DPAA2_PMD_ERR("dpni is NULL");
997                 return;
998         }
999
1000         /* must remain on for all promiscuous */
1001         if (dev->data->promiscuous == 1)
1002                 return;
1003
1004         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1005         if (ret < 0)
1006                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1007 }
1008
1009 static int
1010 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1011 {
1012         int ret;
1013         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1014         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1015         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1016                                 + VLAN_TAG_SIZE;
1017
1018         PMD_INIT_FUNC_TRACE();
1019
1020         if (dpni == NULL) {
1021                 DPAA2_PMD_ERR("dpni is NULL");
1022                 return -EINVAL;
1023         }
1024
1025         /* check that mtu is within the allowed range */
1026         if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
1027                 return -EINVAL;
1028
1029         if (frame_size > ETHER_MAX_LEN)
1030                 dev->data->dev_conf.rxmode.offloads &=
1031                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1032         else
1033                 dev->data->dev_conf.rxmode.offloads &=
1034                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1035
1036         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1037
1038         /* Set the Max Rx frame length as 'mtu' +
1039          * Maximum Ethernet header length
1040          */
1041         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1042                                         frame_size);
1043         if (ret) {
1044                 DPAA2_PMD_ERR("Setting the max frame length failed");
1045                 return -1;
1046         }
1047         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1048         return 0;
1049 }
1050
1051 static int
1052 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1053                        struct ether_addr *addr,
1054                        __rte_unused uint32_t index,
1055                        __rte_unused uint32_t pool)
1056 {
1057         int ret;
1058         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1059         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1060
1061         PMD_INIT_FUNC_TRACE();
1062
1063         if (dpni == NULL) {
1064                 DPAA2_PMD_ERR("dpni is NULL");
1065                 return -1;
1066         }
1067
1068         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1069                                 priv->token, addr->addr_bytes);
1070         if (ret)
1071                 DPAA2_PMD_ERR(
1072                         "error: Adding the MAC ADDR failed: err = %d", ret);
1073         return 0;
1074 }
1075
1076 static void
1077 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1078                           uint32_t index)
1079 {
1080         int ret;
1081         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1082         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1083         struct rte_eth_dev_data *data = dev->data;
1084         struct ether_addr *macaddr;
1085
1086         PMD_INIT_FUNC_TRACE();
1087
1088         macaddr = &data->mac_addrs[index];
1089
1090         if (dpni == NULL) {
1091                 DPAA2_PMD_ERR("dpni is NULL");
1092                 return;
1093         }
1094
1095         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1096                                    priv->token, macaddr->addr_bytes);
1097         if (ret)
1098                 DPAA2_PMD_ERR(
1099                         "error: Removing the MAC ADDR failed: err = %d", ret);
1100 }
1101
1102 static int
1103 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1104                        struct ether_addr *addr)
1105 {
1106         int ret;
1107         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1108         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1109
1110         PMD_INIT_FUNC_TRACE();
1111
1112         if (dpni == NULL) {
1113                 DPAA2_PMD_ERR("dpni is NULL");
1114                 return -EINVAL;
1115         }
1116
1117         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1118                                         priv->token, addr->addr_bytes);
1119
1120         if (ret)
1121                 DPAA2_PMD_ERR(
1122                         "error: Setting the MAC ADDR failed %d", ret);
1123
1124         return ret;
1125 }
1126
1127 static
1128 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1129                          struct rte_eth_stats *stats)
1130 {
1131         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1132         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1133         int32_t  retcode;
1134         uint8_t page0 = 0, page1 = 1, page2 = 2;
1135         union dpni_statistics value;
1136
1137         memset(&value, 0, sizeof(union dpni_statistics));
1138
1139         PMD_INIT_FUNC_TRACE();
1140
1141         if (!dpni) {
1142                 DPAA2_PMD_ERR("dpni is NULL");
1143                 return -EINVAL;
1144         }
1145
1146         if (!stats) {
1147                 DPAA2_PMD_ERR("stats is NULL");
1148                 return -EINVAL;
1149         }
1150
1151         /*Get Counters from page_0*/
1152         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1153                                       page0, 0, &value);
1154         if (retcode)
1155                 goto err;
1156
1157         stats->ipackets = value.page_0.ingress_all_frames;
1158         stats->ibytes = value.page_0.ingress_all_bytes;
1159
1160         /*Get Counters from page_1*/
1161         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1162                                       page1, 0, &value);
1163         if (retcode)
1164                 goto err;
1165
1166         stats->opackets = value.page_1.egress_all_frames;
1167         stats->obytes = value.page_1.egress_all_bytes;
1168
1169         /*Get Counters from page_2*/
1170         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1171                                       page2, 0, &value);
1172         if (retcode)
1173                 goto err;
1174
1175         /* Ingress drop frame count due to configured rules */
1176         stats->ierrors = value.page_2.ingress_filtered_frames;
1177         /* Ingress drop frame count due to error */
1178         stats->ierrors += value.page_2.ingress_discarded_frames;
1179
1180         stats->oerrors = value.page_2.egress_discarded_frames;
1181         stats->imissed = value.page_2.ingress_nobuffer_discards;
1182
1183         return 0;
1184
1185 err:
1186         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1187         return retcode;
1188 };
1189
1190 static int
1191 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1192                      unsigned int n)
1193 {
1194         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1195         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1196         int32_t  retcode;
1197         union dpni_statistics value[3] = {};
1198         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1199
1200         if (n < num)
1201                 return num;
1202
1203         if (xstats == NULL)
1204                 return 0;
1205
1206         /* Get Counters from page_0*/
1207         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1208                                       0, 0, &value[0]);
1209         if (retcode)
1210                 goto err;
1211
1212         /* Get Counters from page_1*/
1213         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1214                                       1, 0, &value[1]);
1215         if (retcode)
1216                 goto err;
1217
1218         /* Get Counters from page_2*/
1219         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1220                                       2, 0, &value[2]);
1221         if (retcode)
1222                 goto err;
1223
1224         for (i = 0; i < num; i++) {
1225                 xstats[i].id = i;
1226                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1227                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1228         }
1229         return i;
1230 err:
1231         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1232         return retcode;
1233 }
1234
1235 static int
1236 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1237                        struct rte_eth_xstat_name *xstats_names,
1238                        unsigned int limit)
1239 {
1240         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1241
1242         if (limit < stat_cnt)
1243                 return stat_cnt;
1244
1245         if (xstats_names != NULL)
1246                 for (i = 0; i < stat_cnt; i++)
1247                         snprintf(xstats_names[i].name,
1248                                  sizeof(xstats_names[i].name),
1249                                  "%s",
1250                                  dpaa2_xstats_strings[i].name);
1251
1252         return stat_cnt;
1253 }
1254
1255 static int
1256 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1257                        uint64_t *values, unsigned int n)
1258 {
1259         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1260         uint64_t values_copy[stat_cnt];
1261
1262         if (!ids) {
1263                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1264                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1265                 int32_t  retcode;
1266                 union dpni_statistics value[3] = {};
1267
1268                 if (n < stat_cnt)
1269                         return stat_cnt;
1270
1271                 if (!values)
1272                         return 0;
1273
1274                 /* Get Counters from page_0*/
1275                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1276                                               0, 0, &value[0]);
1277                 if (retcode)
1278                         return 0;
1279
1280                 /* Get Counters from page_1*/
1281                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1282                                               1, 0, &value[1]);
1283                 if (retcode)
1284                         return 0;
1285
1286                 /* Get Counters from page_2*/
1287                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1288                                               2, 0, &value[2]);
1289                 if (retcode)
1290                         return 0;
1291
1292                 for (i = 0; i < stat_cnt; i++) {
1293                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1294                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1295                 }
1296                 return stat_cnt;
1297         }
1298
1299         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1300
1301         for (i = 0; i < n; i++) {
1302                 if (ids[i] >= stat_cnt) {
1303                         DPAA2_PMD_ERR("xstats id value isn't valid");
1304                         return -1;
1305                 }
1306                 values[i] = values_copy[ids[i]];
1307         }
1308         return n;
1309 }
1310
1311 static int
1312 dpaa2_xstats_get_names_by_id(
1313         struct rte_eth_dev *dev,
1314         struct rte_eth_xstat_name *xstats_names,
1315         const uint64_t *ids,
1316         unsigned int limit)
1317 {
1318         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1319         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1320
1321         if (!ids)
1322                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1323
1324         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1325
1326         for (i = 0; i < limit; i++) {
1327                 if (ids[i] >= stat_cnt) {
1328                         DPAA2_PMD_ERR("xstats id value isn't valid");
1329                         return -1;
1330                 }
1331                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1332         }
1333         return limit;
1334 }
1335
1336 static void
1337 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1338 {
1339         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1340         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1341         int32_t  retcode;
1342
1343         PMD_INIT_FUNC_TRACE();
1344
1345         if (dpni == NULL) {
1346                 DPAA2_PMD_ERR("dpni is NULL");
1347                 return;
1348         }
1349
1350         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1351         if (retcode)
1352                 goto error;
1353
1354         return;
1355
1356 error:
1357         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1358         return;
1359 };
1360
1361 /* return 0 means link status changed, -1 means not changed */
1362 static int
1363 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1364                         int wait_to_complete __rte_unused)
1365 {
1366         int ret;
1367         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1368         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1369         struct rte_eth_link link;
1370         struct dpni_link_state state = {0};
1371
1372         if (dpni == NULL) {
1373                 DPAA2_PMD_ERR("dpni is NULL");
1374                 return 0;
1375         }
1376
1377         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1378         if (ret < 0) {
1379                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1380                 return -1;
1381         }
1382
1383         memset(&link, 0, sizeof(struct rte_eth_link));
1384         link.link_status = state.up;
1385         link.link_speed = state.rate;
1386
1387         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1388                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1389         else
1390                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1391
1392         ret = rte_eth_linkstatus_set(dev, &link);
1393         if (ret == -1)
1394                 DPAA2_PMD_DEBUG("No change in status");
1395         else
1396                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1397                                link.link_status ? "Up" : "Down");
1398
1399         return ret;
1400 }
1401
1402 /**
1403  * Toggle the DPNI to enable, if not already enabled.
1404  * This is not strictly PHY up/down - it is more of logical toggling.
1405  */
1406 static int
1407 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1408 {
1409         int ret = -EINVAL;
1410         struct dpaa2_dev_priv *priv;
1411         struct fsl_mc_io *dpni;
1412         int en = 0;
1413         struct dpni_link_state state = {0};
1414
1415         priv = dev->data->dev_private;
1416         dpni = (struct fsl_mc_io *)priv->hw;
1417
1418         if (dpni == NULL) {
1419                 DPAA2_PMD_ERR("dpni is NULL");
1420                 return ret;
1421         }
1422
1423         /* Check if DPNI is currently enabled */
1424         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1425         if (ret) {
1426                 /* Unable to obtain dpni status; Not continuing */
1427                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1428                 return -EINVAL;
1429         }
1430
1431         /* Enable link if not already enabled */
1432         if (!en) {
1433                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1434                 if (ret) {
1435                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1436                         return -EINVAL;
1437                 }
1438         }
1439         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1440         if (ret < 0) {
1441                 DPAA2_PMD_ERR("Unable to get link state (%d)", ret);
1442                 return -1;
1443         }
1444
1445         /* changing tx burst function to start enqueues */
1446         dev->tx_pkt_burst = dpaa2_dev_tx;
1447         dev->data->dev_link.link_status = state.up;
1448
1449         if (state.up)
1450                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1451         else
1452                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1453         return ret;
1454 }
1455
1456 /**
1457  * Toggle the DPNI to disable, if not already disabled.
1458  * This is not strictly PHY up/down - it is more of logical toggling.
1459  */
1460 static int
1461 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1462 {
1463         int ret = -EINVAL;
1464         struct dpaa2_dev_priv *priv;
1465         struct fsl_mc_io *dpni;
1466         int dpni_enabled = 0;
1467         int retries = 10;
1468
1469         PMD_INIT_FUNC_TRACE();
1470
1471         priv = dev->data->dev_private;
1472         dpni = (struct fsl_mc_io *)priv->hw;
1473
1474         if (dpni == NULL) {
1475                 DPAA2_PMD_ERR("Device has not yet been configured");
1476                 return ret;
1477         }
1478
1479         /*changing  tx burst function to avoid any more enqueues */
1480         dev->tx_pkt_burst = dummy_dev_tx;
1481
1482         /* Loop while dpni_disable() attempts to drain the egress FQs
1483          * and confirm them back to us.
1484          */
1485         do {
1486                 ret = dpni_disable(dpni, 0, priv->token);
1487                 if (ret) {
1488                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1489                         return ret;
1490                 }
1491                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1492                 if (ret) {
1493                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1494                         return ret;
1495                 }
1496                 if (dpni_enabled)
1497                         /* Allow the MC some slack */
1498                         rte_delay_us(100 * 1000);
1499         } while (dpni_enabled && --retries);
1500
1501         if (!retries) {
1502                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1503                 /* todo- we may have to manually cleanup queues.
1504                  */
1505         } else {
1506                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1507                                dev->data->port_id);
1508         }
1509
1510         dev->data->dev_link.link_status = 0;
1511
1512         return ret;
1513 }
1514
1515 static int
1516 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1517 {
1518         int ret = -EINVAL;
1519         struct dpaa2_dev_priv *priv;
1520         struct fsl_mc_io *dpni;
1521         struct dpni_link_state state = {0};
1522
1523         PMD_INIT_FUNC_TRACE();
1524
1525         priv = dev->data->dev_private;
1526         dpni = (struct fsl_mc_io *)priv->hw;
1527
1528         if (dpni == NULL || fc_conf == NULL) {
1529                 DPAA2_PMD_ERR("device not configured");
1530                 return ret;
1531         }
1532
1533         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1534         if (ret) {
1535                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1536                 return ret;
1537         }
1538
1539         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1540         if (state.options & DPNI_LINK_OPT_PAUSE) {
1541                 /* DPNI_LINK_OPT_PAUSE set
1542                  *  if ASYM_PAUSE not set,
1543                  *      RX Side flow control (handle received Pause frame)
1544                  *      TX side flow control (send Pause frame)
1545                  *  if ASYM_PAUSE set,
1546                  *      RX Side flow control (handle received Pause frame)
1547                  *      No TX side flow control (send Pause frame disabled)
1548                  */
1549                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1550                         fc_conf->mode = RTE_FC_FULL;
1551                 else
1552                         fc_conf->mode = RTE_FC_RX_PAUSE;
1553         } else {
1554                 /* DPNI_LINK_OPT_PAUSE not set
1555                  *  if ASYM_PAUSE set,
1556                  *      TX side flow control (send Pause frame)
1557                  *      No RX side flow control (No action on pause frame rx)
1558                  *  if ASYM_PAUSE not set,
1559                  *      Flow control disabled
1560                  */
1561                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1562                         fc_conf->mode = RTE_FC_TX_PAUSE;
1563                 else
1564                         fc_conf->mode = RTE_FC_NONE;
1565         }
1566
1567         return ret;
1568 }
1569
1570 static int
1571 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1572 {
1573         int ret = -EINVAL;
1574         struct dpaa2_dev_priv *priv;
1575         struct fsl_mc_io *dpni;
1576         struct dpni_link_state state = {0};
1577         struct dpni_link_cfg cfg = {0};
1578
1579         PMD_INIT_FUNC_TRACE();
1580
1581         priv = dev->data->dev_private;
1582         dpni = (struct fsl_mc_io *)priv->hw;
1583
1584         if (dpni == NULL) {
1585                 DPAA2_PMD_ERR("dpni is NULL");
1586                 return ret;
1587         }
1588
1589         /* It is necessary to obtain the current state before setting fc_conf
1590          * as MC would return error in case rate, autoneg or duplex values are
1591          * different.
1592          */
1593         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1594         if (ret) {
1595                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1596                 return -1;
1597         }
1598
1599         /* Disable link before setting configuration */
1600         dpaa2_dev_set_link_down(dev);
1601
1602         /* Based on fc_conf, update cfg */
1603         cfg.rate = state.rate;
1604         cfg.options = state.options;
1605
1606         /* update cfg with fc_conf */
1607         switch (fc_conf->mode) {
1608         case RTE_FC_FULL:
1609                 /* Full flow control;
1610                  * OPT_PAUSE set, ASYM_PAUSE not set
1611                  */
1612                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1613                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1614                 break;
1615         case RTE_FC_TX_PAUSE:
1616                 /* Enable RX flow control
1617                  * OPT_PAUSE not set;
1618                  * ASYM_PAUSE set;
1619                  */
1620                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1621                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1622                 break;
1623         case RTE_FC_RX_PAUSE:
1624                 /* Enable TX Flow control
1625                  * OPT_PAUSE set
1626                  * ASYM_PAUSE set
1627                  */
1628                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1629                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1630                 break;
1631         case RTE_FC_NONE:
1632                 /* Disable Flow control
1633                  * OPT_PAUSE not set
1634                  * ASYM_PAUSE not set
1635                  */
1636                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1637                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1638                 break;
1639         default:
1640                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1641                               fc_conf->mode);
1642                 return -1;
1643         }
1644
1645         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1646         if (ret)
1647                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1648                               ret);
1649
1650         /* Enable link */
1651         dpaa2_dev_set_link_up(dev);
1652
1653         return ret;
1654 }
1655
1656 static int
1657 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1658                           struct rte_eth_rss_conf *rss_conf)
1659 {
1660         struct rte_eth_dev_data *data = dev->data;
1661         struct rte_eth_conf *eth_conf = &data->dev_conf;
1662         int ret;
1663
1664         PMD_INIT_FUNC_TRACE();
1665
1666         if (rss_conf->rss_hf) {
1667                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1668                 if (ret) {
1669                         DPAA2_PMD_ERR("Unable to set flow dist");
1670                         return ret;
1671                 }
1672         } else {
1673                 ret = dpaa2_remove_flow_dist(dev, 0);
1674                 if (ret) {
1675                         DPAA2_PMD_ERR("Unable to remove flow dist");
1676                         return ret;
1677                 }
1678         }
1679         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1680         return 0;
1681 }
1682
1683 static int
1684 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1685                             struct rte_eth_rss_conf *rss_conf)
1686 {
1687         struct rte_eth_dev_data *data = dev->data;
1688         struct rte_eth_conf *eth_conf = &data->dev_conf;
1689
1690         /* dpaa2 does not support rss_key, so length should be 0*/
1691         rss_conf->rss_key_len = 0;
1692         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1693         return 0;
1694 }
1695
1696 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1697                 int eth_rx_queue_id,
1698                 uint16_t dpcon_id,
1699                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1700 {
1701         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1702         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1703         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1704         uint8_t flow_id = dpaa2_ethq->flow_id;
1705         struct dpni_queue cfg;
1706         uint8_t options;
1707         int ret;
1708
1709         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1710                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1711         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1712                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1713         else
1714                 return -EINVAL;
1715
1716         memset(&cfg, 0, sizeof(struct dpni_queue));
1717         options = DPNI_QUEUE_OPT_DEST;
1718         cfg.destination.type = DPNI_DEST_DPCON;
1719         cfg.destination.id = dpcon_id;
1720         cfg.destination.priority = queue_conf->ev.priority;
1721
1722         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1723                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1724                 cfg.destination.hold_active = 1;
1725         }
1726
1727         options |= DPNI_QUEUE_OPT_USER_CTX;
1728         cfg.user_context = (size_t)(dpaa2_ethq);
1729
1730         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1731                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1732         if (ret) {
1733                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1734                 return ret;
1735         }
1736
1737         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1738
1739         return 0;
1740 }
1741
1742 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1743                 int eth_rx_queue_id)
1744 {
1745         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1746         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1747         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1748         uint8_t flow_id = dpaa2_ethq->flow_id;
1749         struct dpni_queue cfg;
1750         uint8_t options;
1751         int ret;
1752
1753         memset(&cfg, 0, sizeof(struct dpni_queue));
1754         options = DPNI_QUEUE_OPT_DEST;
1755         cfg.destination.type = DPNI_DEST_NONE;
1756
1757         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1758                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1759         if (ret)
1760                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1761
1762         return ret;
1763 }
1764
1765 static struct eth_dev_ops dpaa2_ethdev_ops = {
1766         .dev_configure    = dpaa2_eth_dev_configure,
1767         .dev_start            = dpaa2_dev_start,
1768         .dev_stop             = dpaa2_dev_stop,
1769         .dev_close            = dpaa2_dev_close,
1770         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1771         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1772         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1773         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1774         .dev_set_link_up      = dpaa2_dev_set_link_up,
1775         .dev_set_link_down    = dpaa2_dev_set_link_down,
1776         .link_update       = dpaa2_dev_link_update,
1777         .stats_get             = dpaa2_dev_stats_get,
1778         .xstats_get            = dpaa2_dev_xstats_get,
1779         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
1780         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1781         .xstats_get_names      = dpaa2_xstats_get_names,
1782         .stats_reset       = dpaa2_dev_stats_reset,
1783         .xstats_reset         = dpaa2_dev_stats_reset,
1784         .fw_version_get    = dpaa2_fw_version_get,
1785         .dev_infos_get     = dpaa2_dev_info_get,
1786         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1787         .mtu_set           = dpaa2_dev_mtu_set,
1788         .vlan_filter_set      = dpaa2_vlan_filter_set,
1789         .vlan_offload_set     = dpaa2_vlan_offload_set,
1790         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1791         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1792         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1793         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1794         .rx_queue_count       = dpaa2_dev_rx_queue_count,
1795         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
1796         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
1797         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1798         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1799         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1800         .rss_hash_update      = dpaa2_dev_rss_hash_update,
1801         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
1802 };
1803
1804 static int
1805 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1806 {
1807         struct rte_device *dev = eth_dev->device;
1808         struct rte_dpaa2_device *dpaa2_dev;
1809         struct fsl_mc_io *dpni_dev;
1810         struct dpni_attr attr;
1811         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1812         struct dpni_buffer_layout layout;
1813         int ret, hw_id;
1814
1815         PMD_INIT_FUNC_TRACE();
1816
1817         /* For secondary processes, the primary has done all the work */
1818         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1819                 return 0;
1820
1821         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1822
1823         hw_id = dpaa2_dev->object_id;
1824
1825         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1826         if (!dpni_dev) {
1827                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
1828                 return -1;
1829         }
1830
1831         dpni_dev->regs = rte_mcp_ptr_list[0];
1832         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1833         if (ret) {
1834                 DPAA2_PMD_ERR(
1835                              "Failure in opening dpni@%d with err code %d",
1836                              hw_id, ret);
1837                 rte_free(dpni_dev);
1838                 return -1;
1839         }
1840
1841         /* Clean the device first */
1842         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1843         if (ret) {
1844                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
1845                               hw_id, ret);
1846                 goto init_err;
1847         }
1848
1849         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1850         if (ret) {
1851                 DPAA2_PMD_ERR(
1852                              "Failure in get dpni@%d attribute, err code %d",
1853                              hw_id, ret);
1854                 goto init_err;
1855         }
1856
1857         priv->num_rx_tc = attr.num_rx_tcs;
1858
1859         /* Resetting the "num_rx_queues" to equal number of queues in first TC
1860          * as only one TC is supported on Rx Side. Once Multiple TCs will be
1861          * in use for Rx processing then this will be changed or removed.
1862          */
1863         priv->nb_rx_queues = attr.num_queues;
1864
1865         /* Using number of TX queues as number of TX TCs */
1866         priv->nb_tx_queues = attr.num_tx_tcs;
1867
1868         DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
1869                         priv->num_rx_tc, priv->nb_rx_queues,
1870                         priv->nb_tx_queues);
1871
1872         priv->hw = dpni_dev;
1873         priv->hw_id = hw_id;
1874         priv->options = attr.options;
1875         priv->max_mac_filters = attr.mac_filter_entries;
1876         priv->max_vlan_filters = attr.vlan_filter_entries;
1877         priv->flags = 0;
1878
1879         /* Allocate memory for hardware structure for queues */
1880         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1881         if (ret) {
1882                 DPAA2_PMD_ERR("Queue allocation Failed");
1883                 goto init_err;
1884         }
1885
1886         /* Allocate memory for storing MAC addresses */
1887         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
1888                 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
1889         if (eth_dev->data->mac_addrs == NULL) {
1890                 DPAA2_PMD_ERR(
1891                    "Failed to allocate %d bytes needed to store MAC addresses",
1892                    ETHER_ADDR_LEN * attr.mac_filter_entries);
1893                 ret = -ENOMEM;
1894                 goto init_err;
1895         }
1896
1897         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1898                                         priv->token,
1899                         (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
1900         if (ret) {
1901                 DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d",
1902                              ret);
1903                 goto init_err;
1904         }
1905
1906         /* ... tx buffer layout ... */
1907         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1908         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1909         layout.pass_frame_status = 1;
1910         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1911                                      DPNI_QUEUE_TX, &layout);
1912         if (ret) {
1913                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
1914                 goto init_err;
1915         }
1916
1917         /* ... tx-conf and error buffer layout ... */
1918         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1919         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1920         layout.pass_frame_status = 1;
1921         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1922                                      DPNI_QUEUE_TX_CONFIRM, &layout);
1923         if (ret) {
1924                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
1925                              ret);
1926                 goto init_err;
1927         }
1928
1929         eth_dev->dev_ops = &dpaa2_ethdev_ops;
1930
1931         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1932         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1933
1934         DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name);
1935         return 0;
1936 init_err:
1937         dpaa2_dev_uninit(eth_dev);
1938         return ret;
1939 }
1940
1941 static int
1942 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
1943 {
1944         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1945         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1946         int i, ret;
1947         struct dpaa2_queue *dpaa2_q;
1948
1949         PMD_INIT_FUNC_TRACE();
1950
1951         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1952                 return 0;
1953
1954         if (!dpni) {
1955                 DPAA2_PMD_WARN("Already closed or not started");
1956                 return -1;
1957         }
1958
1959         dpaa2_dev_close(eth_dev);
1960
1961         if (priv->rx_vq[0]) {
1962                 /* cleaning up queue storage */
1963                 for (i = 0; i < priv->nb_rx_queues; i++) {
1964                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1965                         if (dpaa2_q->q_storage)
1966                                 rte_free(dpaa2_q->q_storage);
1967                 }
1968                 /*free the all queue memory */
1969                 rte_free(priv->rx_vq[0]);
1970                 priv->rx_vq[0] = NULL;
1971         }
1972
1973         /* free memory for storing MAC addresses */
1974         if (eth_dev->data->mac_addrs) {
1975                 rte_free(eth_dev->data->mac_addrs);
1976                 eth_dev->data->mac_addrs = NULL;
1977         }
1978
1979         /* Close the device at underlying layer*/
1980         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1981         if (ret) {
1982                 DPAA2_PMD_ERR(
1983                              "Failure closing dpni device with err code %d",
1984                              ret);
1985         }
1986
1987         /* Free the allocated memory for ethernet private data and dpni*/
1988         priv->hw = NULL;
1989         rte_free(dpni);
1990
1991         eth_dev->dev_ops = NULL;
1992         eth_dev->rx_pkt_burst = NULL;
1993         eth_dev->tx_pkt_burst = NULL;
1994
1995         DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
1996         return 0;
1997 }
1998
1999 static int
2000 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2001                 struct rte_dpaa2_device *dpaa2_dev)
2002 {
2003         struct rte_eth_dev *eth_dev;
2004         int diag;
2005
2006         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2007                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2008                 if (!eth_dev)
2009                         return -ENODEV;
2010                 eth_dev->data->dev_private = rte_zmalloc(
2011                                                 "ethdev private structure",
2012                                                 sizeof(struct dpaa2_dev_priv),
2013                                                 RTE_CACHE_LINE_SIZE);
2014                 if (eth_dev->data->dev_private == NULL) {
2015                         DPAA2_PMD_CRIT(
2016                                 "Unable to allocate memory for private data");
2017                         rte_eth_dev_release_port(eth_dev);
2018                         return -ENOMEM;
2019                 }
2020         } else {
2021                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2022                 if (!eth_dev)
2023                         return -ENODEV;
2024         }
2025
2026         eth_dev->device = &dpaa2_dev->device;
2027         eth_dev->device->driver = &dpaa2_drv->driver;
2028
2029         dpaa2_dev->eth_dev = eth_dev;
2030         eth_dev->data->rx_mbuf_alloc_failed = 0;
2031
2032         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2033                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2034
2035         /* Invoke PMD device initialization function */
2036         diag = dpaa2_dev_init(eth_dev);
2037         if (diag == 0)
2038                 return 0;
2039
2040         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2041                 rte_free(eth_dev->data->dev_private);
2042         rte_eth_dev_release_port(eth_dev);
2043         return diag;
2044 }
2045
2046 static int
2047 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2048 {
2049         struct rte_eth_dev *eth_dev;
2050
2051         eth_dev = dpaa2_dev->eth_dev;
2052         dpaa2_dev_uninit(eth_dev);
2053
2054         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2055                 rte_free(eth_dev->data->dev_private);
2056         rte_eth_dev_release_port(eth_dev);
2057
2058         return 0;
2059 }
2060
2061 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2062         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2063         .drv_type = DPAA2_ETH,
2064         .probe = rte_dpaa2_probe,
2065         .remove = rte_dpaa2_remove,
2066 };
2067
2068 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2069
2070 RTE_INIT(dpaa2_pmd_init_log);
2071 static void
2072 dpaa2_pmd_init_log(void)
2073 {
2074         dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2075         if (dpaa2_logtype_pmd >= 0)
2076                 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
2077 }