net/dpaa2: support low level loopback tester
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /* * SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19 #include <rte_fslmc.h>
20
21 #include "dpaa2_pmd_logs.h"
22 #include <fslmc_vfio.h>
23 #include <dpaa2_hw_pvt.h>
24 #include <dpaa2_hw_mempool.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <mc/fsl_dpmng.h>
27 #include "dpaa2_ethdev.h"
28 #include <fsl_qbman_debug.h>
29
30 #define DRIVER_LOOPBACK_MODE "drv_looback"
31
32 /* Supported Rx offloads */
33 static uint64_t dev_rx_offloads_sup =
34                 DEV_RX_OFFLOAD_VLAN_STRIP |
35                 DEV_RX_OFFLOAD_IPV4_CKSUM |
36                 DEV_RX_OFFLOAD_UDP_CKSUM |
37                 DEV_RX_OFFLOAD_TCP_CKSUM |
38                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
39                 DEV_RX_OFFLOAD_VLAN_FILTER |
40                 DEV_RX_OFFLOAD_JUMBO_FRAME;
41
42 /* Rx offloads which cannot be disabled */
43 static uint64_t dev_rx_offloads_nodis =
44                 DEV_RX_OFFLOAD_SCATTER;
45
46 /* Supported Tx offloads */
47 static uint64_t dev_tx_offloads_sup =
48                 DEV_TX_OFFLOAD_VLAN_INSERT |
49                 DEV_TX_OFFLOAD_IPV4_CKSUM |
50                 DEV_TX_OFFLOAD_UDP_CKSUM |
51                 DEV_TX_OFFLOAD_TCP_CKSUM |
52                 DEV_TX_OFFLOAD_SCTP_CKSUM |
53                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
54
55 /* Tx offloads which cannot be disabled */
56 static uint64_t dev_tx_offloads_nodis =
57                 DEV_TX_OFFLOAD_MULTI_SEGS |
58                 DEV_TX_OFFLOAD_MT_LOCKFREE |
59                 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
60
61 /* enable timestamp in mbuf */
62 enum pmd_dpaa2_ts dpaa2_enable_ts;
63
64 struct rte_dpaa2_xstats_name_off {
65         char name[RTE_ETH_XSTATS_NAME_SIZE];
66         uint8_t page_id; /* dpni statistics page id */
67         uint8_t stats_id; /* stats id in the given page */
68 };
69
70 static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = {
71         {"ingress_multicast_frames", 0, 2},
72         {"ingress_multicast_bytes", 0, 3},
73         {"ingress_broadcast_frames", 0, 4},
74         {"ingress_broadcast_bytes", 0, 5},
75         {"egress_multicast_frames", 1, 2},
76         {"egress_multicast_bytes", 1, 3},
77         {"egress_broadcast_frames", 1, 4},
78         {"egress_broadcast_bytes", 1, 5},
79         {"ingress_filtered_frames", 2, 0},
80         {"ingress_discarded_frames", 2, 1},
81         {"ingress_nobuffer_discards", 2, 2},
82         {"egress_discarded_frames", 2, 3},
83         {"egress_confirmed_frames", 2, 4},
84 };
85
86 static struct rte_dpaa2_driver rte_dpaa2_pmd;
87 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
88 static int dpaa2_dev_link_update(struct rte_eth_dev *dev,
89                                  int wait_to_complete);
90 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
91 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
92 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
93
94 int dpaa2_logtype_pmd;
95
96 __rte_experimental void
97 rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable)
98 {
99         dpaa2_enable_ts = enable;
100 }
101
102 static int
103 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
104 {
105         int ret;
106         struct dpaa2_dev_priv *priv = dev->data->dev_private;
107         struct fsl_mc_io *dpni = priv->hw;
108
109         PMD_INIT_FUNC_TRACE();
110
111         if (dpni == NULL) {
112                 DPAA2_PMD_ERR("dpni is NULL");
113                 return -1;
114         }
115
116         if (on)
117                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
118                                        priv->token, vlan_id);
119         else
120                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
121                                           priv->token, vlan_id);
122
123         if (ret < 0)
124                 DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
125                               ret, vlan_id, priv->hw_id);
126
127         return ret;
128 }
129
130 static int
131 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
132 {
133         struct dpaa2_dev_priv *priv = dev->data->dev_private;
134         struct fsl_mc_io *dpni = priv->hw;
135         int ret;
136
137         PMD_INIT_FUNC_TRACE();
138
139         if (mask & ETH_VLAN_FILTER_MASK) {
140                 /* VLAN Filter not avaialble */
141                 if (!priv->max_vlan_filters) {
142                         DPAA2_PMD_INFO("VLAN filter not available");
143                         goto next_mask;
144                 }
145
146                 if (dev->data->dev_conf.rxmode.offloads &
147                         DEV_RX_OFFLOAD_VLAN_FILTER)
148                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
149                                                       priv->token, true);
150                 else
151                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
152                                                       priv->token, false);
153                 if (ret < 0)
154                         DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
155         }
156 next_mask:
157         if (mask & ETH_VLAN_EXTEND_MASK) {
158                 if (dev->data->dev_conf.rxmode.offloads &
159                         DEV_RX_OFFLOAD_VLAN_EXTEND)
160                         DPAA2_PMD_INFO("VLAN extend offload not supported");
161         }
162
163         return 0;
164 }
165
166 static int
167 dpaa2_vlan_tpid_set(struct rte_eth_dev *dev,
168                       enum rte_vlan_type vlan_type __rte_unused,
169                       uint16_t tpid)
170 {
171         struct dpaa2_dev_priv *priv = dev->data->dev_private;
172         struct fsl_mc_io *dpni = priv->hw;
173         int ret = -ENOTSUP;
174
175         PMD_INIT_FUNC_TRACE();
176
177         /* nothing to be done for standard vlan tpids */
178         if (tpid == 0x8100 || tpid == 0x88A8)
179                 return 0;
180
181         ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
182                                    priv->token, tpid);
183         if (ret < 0)
184                 DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret);
185         /* if already configured tpids, remove them first */
186         if (ret == -EBUSY) {
187                 struct dpni_custom_tpid_cfg tpid_list = {0};
188
189                 ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW,
190                                    priv->token, &tpid_list);
191                 if (ret < 0)
192                         goto fail;
193                 ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW,
194                                    priv->token, tpid_list.tpid1);
195                 if (ret < 0)
196                         goto fail;
197                 ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW,
198                                            priv->token, tpid);
199         }
200 fail:
201         return ret;
202 }
203
204 static int
205 dpaa2_fw_version_get(struct rte_eth_dev *dev,
206                      char *fw_version,
207                      size_t fw_size)
208 {
209         int ret;
210         struct dpaa2_dev_priv *priv = dev->data->dev_private;
211         struct fsl_mc_io *dpni = priv->hw;
212         struct mc_soc_version mc_plat_info = {0};
213         struct mc_version mc_ver_info = {0};
214
215         PMD_INIT_FUNC_TRACE();
216
217         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
218                 DPAA2_PMD_WARN("\tmc_get_soc_version failed");
219
220         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
221                 DPAA2_PMD_WARN("\tmc_get_version failed");
222
223         ret = snprintf(fw_version, fw_size,
224                        "%x-%d.%d.%d",
225                        mc_plat_info.svr,
226                        mc_ver_info.major,
227                        mc_ver_info.minor,
228                        mc_ver_info.revision);
229
230         ret += 1; /* add the size of '\0' */
231         if (fw_size < (uint32_t)ret)
232                 return ret;
233         else
234                 return 0;
235 }
236
237 static void
238 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
239 {
240         struct dpaa2_dev_priv *priv = dev->data->dev_private;
241
242         PMD_INIT_FUNC_TRACE();
243
244         dev_info->if_index = priv->hw_id;
245
246         dev_info->max_mac_addrs = priv->max_mac_filters;
247         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
248         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
249         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
250         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
251         dev_info->rx_offload_capa = dev_rx_offloads_sup |
252                                         dev_rx_offloads_nodis;
253         dev_info->tx_offload_capa = dev_tx_offloads_sup |
254                                         dev_tx_offloads_nodis;
255         dev_info->speed_capa = ETH_LINK_SPEED_1G |
256                         ETH_LINK_SPEED_2_5G |
257                         ETH_LINK_SPEED_10G;
258
259         dev_info->max_hash_mac_addrs = 0;
260         dev_info->max_vfs = 0;
261         dev_info->max_vmdq_pools = ETH_16_POOLS;
262         dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
263 }
264
265 static int
266 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
267 {
268         struct dpaa2_dev_priv *priv = dev->data->dev_private;
269         uint16_t dist_idx;
270         uint32_t vq_id;
271         uint8_t num_rxqueue_per_tc;
272         struct dpaa2_queue *mc_q, *mcq;
273         uint32_t tot_queues;
274         int i;
275         struct dpaa2_queue *dpaa2_q;
276
277         PMD_INIT_FUNC_TRACE();
278
279         num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc);
280         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
281         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
282                           RTE_CACHE_LINE_SIZE);
283         if (!mc_q) {
284                 DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
285                 return -1;
286         }
287
288         for (i = 0; i < priv->nb_rx_queues; i++) {
289                 mc_q->eth_data = dev->data;
290                 priv->rx_vq[i] = mc_q++;
291                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
292                 dpaa2_q->q_storage = rte_malloc("dq_storage",
293                                         sizeof(struct queue_storage_info_t),
294                                         RTE_CACHE_LINE_SIZE);
295                 if (!dpaa2_q->q_storage)
296                         goto fail;
297
298                 memset(dpaa2_q->q_storage, 0,
299                        sizeof(struct queue_storage_info_t));
300                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
301                         goto fail;
302         }
303
304         for (i = 0; i < priv->nb_tx_queues; i++) {
305                 mc_q->eth_data = dev->data;
306                 mc_q->flow_id = 0xffff;
307                 priv->tx_vq[i] = mc_q++;
308                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
309                 dpaa2_q->cscn = rte_malloc(NULL,
310                                            sizeof(struct qbman_result), 16);
311                 if (!dpaa2_q->cscn)
312                         goto fail_tx;
313         }
314
315         vq_id = 0;
316         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
317                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
318                 mcq->tc_index = dist_idx / num_rxqueue_per_tc;
319                 mcq->flow_id = dist_idx % num_rxqueue_per_tc;
320                 vq_id++;
321         }
322
323         return 0;
324 fail_tx:
325         i -= 1;
326         while (i >= 0) {
327                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
328                 rte_free(dpaa2_q->cscn);
329                 priv->tx_vq[i--] = NULL;
330         }
331         i = priv->nb_rx_queues;
332 fail:
333         i -= 1;
334         mc_q = priv->rx_vq[0];
335         while (i >= 0) {
336                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
337                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
338                 rte_free(dpaa2_q->q_storage);
339                 priv->rx_vq[i--] = NULL;
340         }
341         rte_free(mc_q);
342         return -1;
343 }
344
345 static void
346 dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
347 {
348         struct dpaa2_dev_priv *priv = dev->data->dev_private;
349         struct dpaa2_queue *dpaa2_q;
350         int i;
351
352         PMD_INIT_FUNC_TRACE();
353
354         /* Queue allocation base */
355         if (priv->rx_vq[0]) {
356                 /* cleaning up queue storage */
357                 for (i = 0; i < priv->nb_rx_queues; i++) {
358                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
359                         if (dpaa2_q->q_storage)
360                                 rte_free(dpaa2_q->q_storage);
361                 }
362                 /* cleanup tx queue cscn */
363                 for (i = 0; i < priv->nb_tx_queues; i++) {
364                         dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
365                         rte_free(dpaa2_q->cscn);
366                 }
367                 /*free memory for all queues (RX+TX) */
368                 rte_free(priv->rx_vq[0]);
369                 priv->rx_vq[0] = NULL;
370         }
371 }
372
373 static int
374 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
375 {
376         struct dpaa2_dev_priv *priv = dev->data->dev_private;
377         struct fsl_mc_io *dpni = priv->hw;
378         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
379         uint64_t rx_offloads = eth_conf->rxmode.offloads;
380         uint64_t tx_offloads = eth_conf->txmode.offloads;
381         int rx_l3_csum_offload = false;
382         int rx_l4_csum_offload = false;
383         int tx_l3_csum_offload = false;
384         int tx_l4_csum_offload = false;
385         int ret;
386
387         PMD_INIT_FUNC_TRACE();
388
389         /* Rx offloads validation */
390         if (dev_rx_offloads_nodis & ~rx_offloads) {
391                 DPAA2_PMD_WARN(
392                 "Rx offloads non configurable - requested 0x%" PRIx64
393                 " ignored 0x%" PRIx64,
394                         rx_offloads, dev_rx_offloads_nodis);
395         }
396
397         /* Tx offloads validation */
398         if (dev_tx_offloads_nodis & ~tx_offloads) {
399                 DPAA2_PMD_WARN(
400                 "Tx offloads non configurable - requested 0x%" PRIx64
401                 " ignored 0x%" PRIx64,
402                         tx_offloads, dev_tx_offloads_nodis);
403         }
404
405         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
406                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
407                         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
408                                 priv->token, eth_conf->rxmode.max_rx_pkt_len);
409                         if (ret) {
410                                 DPAA2_PMD_ERR(
411                                         "Unable to set mtu. check config");
412                                 return ret;
413                         }
414                 } else {
415                         return -1;
416                 }
417         }
418
419         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
420                 ret = dpaa2_setup_flow_dist(dev,
421                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
422                 if (ret) {
423                         DPAA2_PMD_ERR("Unable to set flow distribution."
424                                       "Check queue config");
425                         return ret;
426                 }
427         }
428
429         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
430                 rx_l3_csum_offload = true;
431
432         if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
433                 (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
434                 rx_l4_csum_offload = true;
435
436         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
437                                DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
438         if (ret) {
439                 DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
440                 return ret;
441         }
442
443         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
444                                DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
445         if (ret) {
446                 DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
447                 return ret;
448         }
449
450         if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
451                 tx_l3_csum_offload = true;
452
453         if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
454                 (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
455                 (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
456                 tx_l4_csum_offload = true;
457
458         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
459                                DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
460         if (ret) {
461                 DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
462                 return ret;
463         }
464
465         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
466                                DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
467         if (ret) {
468                 DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
469                 return ret;
470         }
471
472         /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
473          * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
474          * to 0 for LS2 in the hardware thus disabling data/annotation
475          * stashing. For LX2 this is fixed in hardware and thus hash result and
476          * parse results can be received in FD using this option.
477          */
478         if (dpaa2_svr_family == SVR_LX2160A) {
479                 ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
480                                        DPNI_FLCTYPE_HASH, true);
481                 if (ret) {
482                         DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
483                         return ret;
484                 }
485         }
486
487         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
488                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
489
490         /* update the current status */
491         dpaa2_dev_link_update(dev, 0);
492
493         return 0;
494 }
495
496 /* Function to setup RX flow information. It contains traffic class ID,
497  * flow ID, destination configuration etc.
498  */
499 static int
500 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
501                          uint16_t rx_queue_id,
502                          uint16_t nb_rx_desc __rte_unused,
503                          unsigned int socket_id __rte_unused,
504                          const struct rte_eth_rxconf *rx_conf __rte_unused,
505                          struct rte_mempool *mb_pool)
506 {
507         struct dpaa2_dev_priv *priv = dev->data->dev_private;
508         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
509         struct dpaa2_queue *dpaa2_q;
510         struct dpni_queue cfg;
511         uint8_t options = 0;
512         uint8_t flow_id;
513         uint32_t bpid;
514         int ret;
515
516         PMD_INIT_FUNC_TRACE();
517
518         DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
519                         dev, rx_queue_id, mb_pool, rx_conf);
520
521         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
522                 bpid = mempool_to_bpid(mb_pool);
523                 ret = dpaa2_attach_bp_list(priv,
524                                            rte_dpaa2_bpid_info[bpid].bp_list);
525                 if (ret)
526                         return ret;
527         }
528         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
529         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
530         dpaa2_q->bp_array = rte_dpaa2_bpid_info;
531
532         /*Get the flow id from given VQ id*/
533         flow_id = rx_queue_id % priv->nb_rx_queues;
534         memset(&cfg, 0, sizeof(struct dpni_queue));
535
536         options = options | DPNI_QUEUE_OPT_USER_CTX;
537         cfg.user_context = (size_t)(dpaa2_q);
538
539         /*if ls2088 or rev2 device, enable the stashing */
540
541         if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) {
542                 options |= DPNI_QUEUE_OPT_FLC;
543                 cfg.flc.stash_control = true;
544                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
545                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
546                  * data stashing setting 01 01 00 (0x14)
547                  * (in following order ->DS AS CS)
548                  * to enable 1 line data, 1 line annotation.
549                  * For LX2, this setting should be 01 00 00 (0x10)
550                  */
551                 if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A)
552                         cfg.flc.value |= 0x10;
553                 else
554                         cfg.flc.value |= 0x14;
555         }
556         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
557                              dpaa2_q->tc_index, flow_id, options, &cfg);
558         if (ret) {
559                 DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
560                 return -1;
561         }
562
563         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
564                 struct dpni_taildrop taildrop;
565
566                 taildrop.enable = 1;
567                 /*enabling per rx queue congestion control */
568                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
569                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
570                 taildrop.oal = CONG_RX_OAL;
571                 DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
572                                 rx_queue_id);
573                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
574                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
575                                         dpaa2_q->tc_index, flow_id, &taildrop);
576                 if (ret) {
577                         DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
578                                       ret);
579                         return -1;
580                 }
581         }
582
583         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
584         return 0;
585 }
586
587 static int
588 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
589                          uint16_t tx_queue_id,
590                          uint16_t nb_tx_desc __rte_unused,
591                          unsigned int socket_id __rte_unused,
592                          const struct rte_eth_txconf *tx_conf __rte_unused)
593 {
594         struct dpaa2_dev_priv *priv = dev->data->dev_private;
595         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
596                 priv->tx_vq[tx_queue_id];
597         struct fsl_mc_io *dpni = priv->hw;
598         struct dpni_queue tx_conf_cfg;
599         struct dpni_queue tx_flow_cfg;
600         uint8_t options = 0, flow_id;
601         uint32_t tc_id;
602         int ret;
603
604         PMD_INIT_FUNC_TRACE();
605
606         /* Return if queue already configured */
607         if (dpaa2_q->flow_id != 0xffff) {
608                 dev->data->tx_queues[tx_queue_id] = dpaa2_q;
609                 return 0;
610         }
611
612         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
613         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
614
615         tc_id = tx_queue_id;
616         flow_id = 0;
617
618         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
619                              tc_id, flow_id, options, &tx_flow_cfg);
620         if (ret) {
621                 DPAA2_PMD_ERR("Error in setting the tx flow: "
622                               "tc_id=%d, flow=%d err=%d",
623                               tc_id, flow_id, ret);
624                         return -1;
625         }
626
627         dpaa2_q->flow_id = flow_id;
628
629         if (tx_queue_id == 0) {
630                 /*Set tx-conf and error configuration*/
631                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
632                                                     priv->token,
633                                                     DPNI_CONF_DISABLE);
634                 if (ret) {
635                         DPAA2_PMD_ERR("Error in set tx conf mode settings: "
636                                       "err=%d", ret);
637                         return -1;
638                 }
639         }
640         dpaa2_q->tc_index = tc_id;
641
642         if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
643                 struct dpni_congestion_notification_cfg cong_notif_cfg;
644
645                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
646                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
647                 /* Notify that the queue is not congested when the data in
648                  * the queue is below this thershold.
649                  */
650                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
651                 cong_notif_cfg.message_ctx = 0;
652                 cong_notif_cfg.message_iova =
653                                 (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
654                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
655                 cong_notif_cfg.notification_mode =
656                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
657                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
658                                          DPNI_CONG_OPT_COHERENT_WRITE;
659
660                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
661                                                        priv->token,
662                                                        DPNI_QUEUE_TX,
663                                                        tc_id,
664                                                        &cong_notif_cfg);
665                 if (ret) {
666                         DPAA2_PMD_ERR(
667                            "Error in setting tx congestion notification: "
668                            "err=%d", ret);
669                         return -ret;
670                 }
671         }
672         dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
673         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
674         return 0;
675 }
676
677 static void
678 dpaa2_dev_rx_queue_release(void *q __rte_unused)
679 {
680         PMD_INIT_FUNC_TRACE();
681 }
682
683 static void
684 dpaa2_dev_tx_queue_release(void *q __rte_unused)
685 {
686         PMD_INIT_FUNC_TRACE();
687 }
688
689 static uint32_t
690 dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
691 {
692         int32_t ret;
693         struct dpaa2_dev_priv *priv = dev->data->dev_private;
694         struct dpaa2_queue *dpaa2_q;
695         struct qbman_swp *swp;
696         struct qbman_fq_query_np_rslt state;
697         uint32_t frame_cnt = 0;
698
699         PMD_INIT_FUNC_TRACE();
700
701         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
702                 ret = dpaa2_affine_qbman_swp();
703                 if (ret) {
704                         DPAA2_PMD_ERR("Failure in affining portal");
705                         return -EINVAL;
706                 }
707         }
708         swp = DPAA2_PER_LCORE_PORTAL;
709
710         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
711
712         if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
713                 frame_cnt = qbman_fq_state_frame_count(&state);
714                 DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
715                                 rx_queue_id, frame_cnt);
716         }
717         return frame_cnt;
718 }
719
720 static const uint32_t *
721 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
722 {
723         static const uint32_t ptypes[] = {
724                 /*todo -= add more types */
725                 RTE_PTYPE_L2_ETHER,
726                 RTE_PTYPE_L3_IPV4,
727                 RTE_PTYPE_L3_IPV4_EXT,
728                 RTE_PTYPE_L3_IPV6,
729                 RTE_PTYPE_L3_IPV6_EXT,
730                 RTE_PTYPE_L4_TCP,
731                 RTE_PTYPE_L4_UDP,
732                 RTE_PTYPE_L4_SCTP,
733                 RTE_PTYPE_L4_ICMP,
734                 RTE_PTYPE_UNKNOWN
735         };
736
737         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
738                 dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
739                 return ptypes;
740         return NULL;
741 }
742
743 /**
744  * Dpaa2 link Interrupt handler
745  *
746  * @param param
747  *  The address of parameter (struct rte_eth_dev *) regsitered before.
748  *
749  * @return
750  *  void
751  */
752 static void
753 dpaa2_interrupt_handler(void *param)
754 {
755         struct rte_eth_dev *dev = param;
756         struct dpaa2_dev_priv *priv = dev->data->dev_private;
757         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
758         int ret;
759         int irq_index = DPNI_IRQ_INDEX;
760         unsigned int status = 0, clear = 0;
761
762         PMD_INIT_FUNC_TRACE();
763
764         if (dpni == NULL) {
765                 DPAA2_PMD_ERR("dpni is NULL");
766                 return;
767         }
768
769         ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
770                                   irq_index, &status);
771         if (unlikely(ret)) {
772                 DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
773                 clear = 0xffffffff;
774                 goto out;
775         }
776
777         if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
778                 clear = DPNI_IRQ_EVENT_LINK_CHANGED;
779                 dpaa2_dev_link_update(dev, 0);
780                 /* calling all the apps registered for link status event */
781                 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
782                                               NULL);
783         }
784 out:
785         ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
786                                     irq_index, clear);
787         if (unlikely(ret))
788                 DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
789 }
790
791 static int
792 dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
793 {
794         int err = 0;
795         struct dpaa2_dev_priv *priv = dev->data->dev_private;
796         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
797         int irq_index = DPNI_IRQ_INDEX;
798         unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED;
799
800         PMD_INIT_FUNC_TRACE();
801
802         err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
803                                 irq_index, mask);
804         if (err < 0) {
805                 DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
806                               strerror(-err));
807                 return err;
808         }
809
810         err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
811                                   irq_index, enable);
812         if (err < 0)
813                 DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
814                               strerror(-err));
815
816         return err;
817 }
818
819 static int
820 dpaa2_dev_start(struct rte_eth_dev *dev)
821 {
822         struct rte_device *rdev = dev->device;
823         struct rte_dpaa2_device *dpaa2_dev;
824         struct rte_eth_dev_data *data = dev->data;
825         struct dpaa2_dev_priv *priv = data->dev_private;
826         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
827         struct dpni_queue cfg;
828         struct dpni_error_cfg   err_cfg;
829         uint16_t qdid;
830         struct dpni_queue_id qid;
831         struct dpaa2_queue *dpaa2_q;
832         int ret, i;
833         struct rte_intr_handle *intr_handle;
834
835         dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
836         intr_handle = &dpaa2_dev->intr_handle;
837
838         PMD_INIT_FUNC_TRACE();
839
840         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
841         if (ret) {
842                 DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
843                               priv->hw_id, ret);
844                 return ret;
845         }
846
847         /* Power up the phy. Needed to make the link go UP */
848         dpaa2_dev_set_link_up(dev);
849
850         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
851                             DPNI_QUEUE_TX, &qdid);
852         if (ret) {
853                 DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
854                 return ret;
855         }
856         priv->qdid = qdid;
857
858         for (i = 0; i < data->nb_rx_queues; i++) {
859                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
860                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
861                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
862                                        dpaa2_q->flow_id, &cfg, &qid);
863                 if (ret) {
864                         DPAA2_PMD_ERR("Error in getting flow information: "
865                                       "err=%d", ret);
866                         return ret;
867                 }
868                 dpaa2_q->fqid = qid.fqid;
869         }
870
871         /*checksum errors, send them to normal path and set it in annotation */
872         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
873
874         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
875         err_cfg.set_frame_annotation = true;
876
877         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
878                                        priv->token, &err_cfg);
879         if (ret) {
880                 DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
881                               ret);
882                 return ret;
883         }
884
885         /* if the interrupts were configured on this devices*/
886         if (intr_handle && (intr_handle->fd) &&
887             (dev->data->dev_conf.intr_conf.lsc != 0)) {
888                 /* Registering LSC interrupt handler */
889                 rte_intr_callback_register(intr_handle,
890                                            dpaa2_interrupt_handler,
891                                            (void *)dev);
892
893                 /* enable vfio intr/eventfd mapping
894                  * Interrupt index 0 is required, so we can not use
895                  * rte_intr_enable.
896                  */
897                 rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX);
898
899                 /* enable dpni_irqs */
900                 dpaa2_eth_setup_irqs(dev, 1);
901         }
902
903         /* Change the tx burst function if ordered queues are used */
904         if (priv->en_ordered)
905                 dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
906
907         return 0;
908 }
909
910 /**
911  *  This routine disables all traffic on the adapter by issuing a
912  *  global reset on the MAC.
913  */
914 static void
915 dpaa2_dev_stop(struct rte_eth_dev *dev)
916 {
917         struct dpaa2_dev_priv *priv = dev->data->dev_private;
918         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
919         int ret;
920         struct rte_eth_link link;
921         struct rte_intr_handle *intr_handle = dev->intr_handle;
922
923         PMD_INIT_FUNC_TRACE();
924
925         /* reset interrupt callback  */
926         if (intr_handle && (intr_handle->fd) &&
927             (dev->data->dev_conf.intr_conf.lsc != 0)) {
928                 /*disable dpni irqs */
929                 dpaa2_eth_setup_irqs(dev, 0);
930
931                 /* disable vfio intr before callback unregister */
932                 rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX);
933
934                 /* Unregistering LSC interrupt handler */
935                 rte_intr_callback_unregister(intr_handle,
936                                              dpaa2_interrupt_handler,
937                                              (void *)dev);
938         }
939
940         dpaa2_dev_set_link_down(dev);
941
942         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
943         if (ret) {
944                 DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
945                               ret, priv->hw_id);
946                 return;
947         }
948
949         /* clear the recorded link status */
950         memset(&link, 0, sizeof(link));
951         rte_eth_linkstatus_set(dev, &link);
952 }
953
954 static void
955 dpaa2_dev_close(struct rte_eth_dev *dev)
956 {
957         struct dpaa2_dev_priv *priv = dev->data->dev_private;
958         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
959         int ret;
960         struct rte_eth_link link;
961
962         PMD_INIT_FUNC_TRACE();
963
964         /* Clean the device first */
965         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
966         if (ret) {
967                 DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
968                 return;
969         }
970
971         memset(&link, 0, sizeof(link));
972         rte_eth_linkstatus_set(dev, &link);
973 }
974
975 static void
976 dpaa2_dev_promiscuous_enable(
977                 struct rte_eth_dev *dev)
978 {
979         int ret;
980         struct dpaa2_dev_priv *priv = dev->data->dev_private;
981         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
982
983         PMD_INIT_FUNC_TRACE();
984
985         if (dpni == NULL) {
986                 DPAA2_PMD_ERR("dpni is NULL");
987                 return;
988         }
989
990         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
991         if (ret < 0)
992                 DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
993
994         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
995         if (ret < 0)
996                 DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
997 }
998
999 static void
1000 dpaa2_dev_promiscuous_disable(
1001                 struct rte_eth_dev *dev)
1002 {
1003         int ret;
1004         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1005         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1006
1007         PMD_INIT_FUNC_TRACE();
1008
1009         if (dpni == NULL) {
1010                 DPAA2_PMD_ERR("dpni is NULL");
1011                 return;
1012         }
1013
1014         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1015         if (ret < 0)
1016                 DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
1017
1018         if (dev->data->all_multicast == 0) {
1019                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
1020                                                  priv->token, false);
1021                 if (ret < 0)
1022                         DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
1023                                       ret);
1024         }
1025 }
1026
1027 static void
1028 dpaa2_dev_allmulticast_enable(
1029                 struct rte_eth_dev *dev)
1030 {
1031         int ret;
1032         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1033         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1034
1035         PMD_INIT_FUNC_TRACE();
1036
1037         if (dpni == NULL) {
1038                 DPAA2_PMD_ERR("dpni is NULL");
1039                 return;
1040         }
1041
1042         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
1043         if (ret < 0)
1044                 DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
1045 }
1046
1047 static void
1048 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
1049 {
1050         int ret;
1051         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1052         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1053
1054         PMD_INIT_FUNC_TRACE();
1055
1056         if (dpni == NULL) {
1057                 DPAA2_PMD_ERR("dpni is NULL");
1058                 return;
1059         }
1060
1061         /* must remain on for all promiscuous */
1062         if (dev->data->promiscuous == 1)
1063                 return;
1064
1065         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
1066         if (ret < 0)
1067                 DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
1068 }
1069
1070 static int
1071 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1072 {
1073         int ret;
1074         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1075         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1076         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1077                                 + VLAN_TAG_SIZE;
1078
1079         PMD_INIT_FUNC_TRACE();
1080
1081         if (dpni == NULL) {
1082                 DPAA2_PMD_ERR("dpni is NULL");
1083                 return -EINVAL;
1084         }
1085
1086         /* check that mtu is within the allowed range */
1087         if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
1088                 return -EINVAL;
1089
1090         if (frame_size > ETHER_MAX_LEN)
1091                 dev->data->dev_conf.rxmode.offloads &=
1092                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
1093         else
1094                 dev->data->dev_conf.rxmode.offloads &=
1095                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1096
1097         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1098
1099         /* Set the Max Rx frame length as 'mtu' +
1100          * Maximum Ethernet header length
1101          */
1102         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
1103                                         frame_size);
1104         if (ret) {
1105                 DPAA2_PMD_ERR("Setting the max frame length failed");
1106                 return -1;
1107         }
1108         DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
1109         return 0;
1110 }
1111
1112 static int
1113 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
1114                        struct ether_addr *addr,
1115                        __rte_unused uint32_t index,
1116                        __rte_unused uint32_t pool)
1117 {
1118         int ret;
1119         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1120         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1121
1122         PMD_INIT_FUNC_TRACE();
1123
1124         if (dpni == NULL) {
1125                 DPAA2_PMD_ERR("dpni is NULL");
1126                 return -1;
1127         }
1128
1129         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
1130                                 priv->token, addr->addr_bytes);
1131         if (ret)
1132                 DPAA2_PMD_ERR(
1133                         "error: Adding the MAC ADDR failed: err = %d", ret);
1134         return 0;
1135 }
1136
1137 static void
1138 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
1139                           uint32_t index)
1140 {
1141         int ret;
1142         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1143         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1144         struct rte_eth_dev_data *data = dev->data;
1145         struct ether_addr *macaddr;
1146
1147         PMD_INIT_FUNC_TRACE();
1148
1149         macaddr = &data->mac_addrs[index];
1150
1151         if (dpni == NULL) {
1152                 DPAA2_PMD_ERR("dpni is NULL");
1153                 return;
1154         }
1155
1156         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
1157                                    priv->token, macaddr->addr_bytes);
1158         if (ret)
1159                 DPAA2_PMD_ERR(
1160                         "error: Removing the MAC ADDR failed: err = %d", ret);
1161 }
1162
1163 static int
1164 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
1165                        struct ether_addr *addr)
1166 {
1167         int ret;
1168         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1169         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1170
1171         PMD_INIT_FUNC_TRACE();
1172
1173         if (dpni == NULL) {
1174                 DPAA2_PMD_ERR("dpni is NULL");
1175                 return -EINVAL;
1176         }
1177
1178         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
1179                                         priv->token, addr->addr_bytes);
1180
1181         if (ret)
1182                 DPAA2_PMD_ERR(
1183                         "error: Setting the MAC ADDR failed %d", ret);
1184
1185         return ret;
1186 }
1187
1188 static
1189 int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
1190                          struct rte_eth_stats *stats)
1191 {
1192         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1193         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1194         int32_t  retcode;
1195         uint8_t page0 = 0, page1 = 1, page2 = 2;
1196         union dpni_statistics value;
1197         int i;
1198         struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq;
1199
1200         memset(&value, 0, sizeof(union dpni_statistics));
1201
1202         PMD_INIT_FUNC_TRACE();
1203
1204         if (!dpni) {
1205                 DPAA2_PMD_ERR("dpni is NULL");
1206                 return -EINVAL;
1207         }
1208
1209         if (!stats) {
1210                 DPAA2_PMD_ERR("stats is NULL");
1211                 return -EINVAL;
1212         }
1213
1214         /*Get Counters from page_0*/
1215         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1216                                       page0, 0, &value);
1217         if (retcode)
1218                 goto err;
1219
1220         stats->ipackets = value.page_0.ingress_all_frames;
1221         stats->ibytes = value.page_0.ingress_all_bytes;
1222
1223         /*Get Counters from page_1*/
1224         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1225                                       page1, 0, &value);
1226         if (retcode)
1227                 goto err;
1228
1229         stats->opackets = value.page_1.egress_all_frames;
1230         stats->obytes = value.page_1.egress_all_bytes;
1231
1232         /*Get Counters from page_2*/
1233         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1234                                       page2, 0, &value);
1235         if (retcode)
1236                 goto err;
1237
1238         /* Ingress drop frame count due to configured rules */
1239         stats->ierrors = value.page_2.ingress_filtered_frames;
1240         /* Ingress drop frame count due to error */
1241         stats->ierrors += value.page_2.ingress_discarded_frames;
1242
1243         stats->oerrors = value.page_2.egress_discarded_frames;
1244         stats->imissed = value.page_2.ingress_nobuffer_discards;
1245
1246         /* Fill in per queue stats */
1247         for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1248                 (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) {
1249                 dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i];
1250                 dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i];
1251                 if (dpaa2_rxq)
1252                         stats->q_ipackets[i] = dpaa2_rxq->rx_pkts;
1253                 if (dpaa2_txq)
1254                         stats->q_opackets[i] = dpaa2_txq->tx_pkts;
1255
1256                 /* Byte counting is not implemented */
1257                 stats->q_ibytes[i]   = 0;
1258                 stats->q_obytes[i]   = 0;
1259         }
1260
1261         return 0;
1262
1263 err:
1264         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1265         return retcode;
1266 };
1267
1268 static int
1269 dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1270                      unsigned int n)
1271 {
1272         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1273         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1274         int32_t  retcode;
1275         union dpni_statistics value[3] = {};
1276         unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
1277
1278         if (n < num)
1279                 return num;
1280
1281         if (xstats == NULL)
1282                 return 0;
1283
1284         /* Get Counters from page_0*/
1285         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1286                                       0, 0, &value[0]);
1287         if (retcode)
1288                 goto err;
1289
1290         /* Get Counters from page_1*/
1291         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1292                                       1, 0, &value[1]);
1293         if (retcode)
1294                 goto err;
1295
1296         /* Get Counters from page_2*/
1297         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1298                                       2, 0, &value[2]);
1299         if (retcode)
1300                 goto err;
1301
1302         for (i = 0; i < num; i++) {
1303                 xstats[i].id = i;
1304                 xstats[i].value = value[dpaa2_xstats_strings[i].page_id].
1305                         raw.counter[dpaa2_xstats_strings[i].stats_id];
1306         }
1307         return i;
1308 err:
1309         DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
1310         return retcode;
1311 }
1312
1313 static int
1314 dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1315                        struct rte_eth_xstat_name *xstats_names,
1316                        unsigned int limit)
1317 {
1318         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1319
1320         if (limit < stat_cnt)
1321                 return stat_cnt;
1322
1323         if (xstats_names != NULL)
1324                 for (i = 0; i < stat_cnt; i++)
1325                         snprintf(xstats_names[i].name,
1326                                  sizeof(xstats_names[i].name),
1327                                  "%s",
1328                                  dpaa2_xstats_strings[i].name);
1329
1330         return stat_cnt;
1331 }
1332
1333 static int
1334 dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1335                        uint64_t *values, unsigned int n)
1336 {
1337         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1338         uint64_t values_copy[stat_cnt];
1339
1340         if (!ids) {
1341                 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1342                 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1343                 int32_t  retcode;
1344                 union dpni_statistics value[3] = {};
1345
1346                 if (n < stat_cnt)
1347                         return stat_cnt;
1348
1349                 if (!values)
1350                         return 0;
1351
1352                 /* Get Counters from page_0*/
1353                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1354                                               0, 0, &value[0]);
1355                 if (retcode)
1356                         return 0;
1357
1358                 /* Get Counters from page_1*/
1359                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1360                                               1, 0, &value[1]);
1361                 if (retcode)
1362                         return 0;
1363
1364                 /* Get Counters from page_2*/
1365                 retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
1366                                               2, 0, &value[2]);
1367                 if (retcode)
1368                         return 0;
1369
1370                 for (i = 0; i < stat_cnt; i++) {
1371                         values[i] = value[dpaa2_xstats_strings[i].page_id].
1372                                 raw.counter[dpaa2_xstats_strings[i].stats_id];
1373                 }
1374                 return stat_cnt;
1375         }
1376
1377         dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt);
1378
1379         for (i = 0; i < n; i++) {
1380                 if (ids[i] >= stat_cnt) {
1381                         DPAA2_PMD_ERR("xstats id value isn't valid");
1382                         return -1;
1383                 }
1384                 values[i] = values_copy[ids[i]];
1385         }
1386         return n;
1387 }
1388
1389 static int
1390 dpaa2_xstats_get_names_by_id(
1391         struct rte_eth_dev *dev,
1392         struct rte_eth_xstat_name *xstats_names,
1393         const uint64_t *ids,
1394         unsigned int limit)
1395 {
1396         unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
1397         struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
1398
1399         if (!ids)
1400                 return dpaa2_xstats_get_names(dev, xstats_names, limit);
1401
1402         dpaa2_xstats_get_names(dev, xstats_names_copy, limit);
1403
1404         for (i = 0; i < limit; i++) {
1405                 if (ids[i] >= stat_cnt) {
1406                         DPAA2_PMD_ERR("xstats id value isn't valid");
1407                         return -1;
1408                 }
1409                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1410         }
1411         return limit;
1412 }
1413
1414 static void
1415 dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
1416 {
1417         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1418         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1419         int32_t  retcode;
1420         int i;
1421         struct dpaa2_queue *dpaa2_q;
1422
1423         PMD_INIT_FUNC_TRACE();
1424
1425         if (dpni == NULL) {
1426                 DPAA2_PMD_ERR("dpni is NULL");
1427                 return;
1428         }
1429
1430         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
1431         if (retcode)
1432                 goto error;
1433
1434         /* Reset the per queue stats in dpaa2_queue structure */
1435         for (i = 0; i < priv->nb_rx_queues; i++) {
1436                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1437                 if (dpaa2_q)
1438                         dpaa2_q->rx_pkts = 0;
1439         }
1440
1441         for (i = 0; i < priv->nb_tx_queues; i++) {
1442                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
1443                 if (dpaa2_q)
1444                         dpaa2_q->tx_pkts = 0;
1445         }
1446
1447         return;
1448
1449 error:
1450         DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
1451         return;
1452 };
1453
1454 /* return 0 means link status changed, -1 means not changed */
1455 static int
1456 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1457                         int wait_to_complete __rte_unused)
1458 {
1459         int ret;
1460         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1461         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1462         struct rte_eth_link link;
1463         struct dpni_link_state state = {0};
1464
1465         if (dpni == NULL) {
1466                 DPAA2_PMD_ERR("dpni is NULL");
1467                 return 0;
1468         }
1469
1470         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1471         if (ret < 0) {
1472                 DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
1473                 return -1;
1474         }
1475
1476         memset(&link, 0, sizeof(struct rte_eth_link));
1477         link.link_status = state.up;
1478         link.link_speed = state.rate;
1479
1480         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1481                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1482         else
1483                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1484
1485         ret = rte_eth_linkstatus_set(dev, &link);
1486         if (ret == -1)
1487                 DPAA2_PMD_DEBUG("No change in status");
1488         else
1489                 DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
1490                                link.link_status ? "Up" : "Down");
1491
1492         return ret;
1493 }
1494
1495 /**
1496  * Toggle the DPNI to enable, if not already enabled.
1497  * This is not strictly PHY up/down - it is more of logical toggling.
1498  */
1499 static int
1500 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1501 {
1502         int ret = -EINVAL;
1503         struct dpaa2_dev_priv *priv;
1504         struct fsl_mc_io *dpni;
1505         int en = 0;
1506         struct dpni_link_state state = {0};
1507
1508         priv = dev->data->dev_private;
1509         dpni = (struct fsl_mc_io *)priv->hw;
1510
1511         if (dpni == NULL) {
1512                 DPAA2_PMD_ERR("dpni is NULL");
1513                 return ret;
1514         }
1515
1516         /* Check if DPNI is currently enabled */
1517         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1518         if (ret) {
1519                 /* Unable to obtain dpni status; Not continuing */
1520                 DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1521                 return -EINVAL;
1522         }
1523
1524         /* Enable link if not already enabled */
1525         if (!en) {
1526                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1527                 if (ret) {
1528                         DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
1529                         return -EINVAL;
1530                 }
1531         }
1532         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1533         if (ret < 0) {
1534                 DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret);
1535                 return -1;
1536         }
1537
1538         /* changing tx burst function to start enqueues */
1539         dev->tx_pkt_burst = dpaa2_dev_tx;
1540         dev->data->dev_link.link_status = state.up;
1541
1542         if (state.up)
1543                 DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
1544         else
1545                 DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
1546         return ret;
1547 }
1548
1549 /**
1550  * Toggle the DPNI to disable, if not already disabled.
1551  * This is not strictly PHY up/down - it is more of logical toggling.
1552  */
1553 static int
1554 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1555 {
1556         int ret = -EINVAL;
1557         struct dpaa2_dev_priv *priv;
1558         struct fsl_mc_io *dpni;
1559         int dpni_enabled = 0;
1560         int retries = 10;
1561
1562         PMD_INIT_FUNC_TRACE();
1563
1564         priv = dev->data->dev_private;
1565         dpni = (struct fsl_mc_io *)priv->hw;
1566
1567         if (dpni == NULL) {
1568                 DPAA2_PMD_ERR("Device has not yet been configured");
1569                 return ret;
1570         }
1571
1572         /*changing  tx burst function to avoid any more enqueues */
1573         dev->tx_pkt_burst = dummy_dev_tx;
1574
1575         /* Loop while dpni_disable() attempts to drain the egress FQs
1576          * and confirm them back to us.
1577          */
1578         do {
1579                 ret = dpni_disable(dpni, 0, priv->token);
1580                 if (ret) {
1581                         DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
1582                         return ret;
1583                 }
1584                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1585                 if (ret) {
1586                         DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
1587                         return ret;
1588                 }
1589                 if (dpni_enabled)
1590                         /* Allow the MC some slack */
1591                         rte_delay_us(100 * 1000);
1592         } while (dpni_enabled && --retries);
1593
1594         if (!retries) {
1595                 DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
1596                 /* todo- we may have to manually cleanup queues.
1597                  */
1598         } else {
1599                 DPAA2_PMD_INFO("Port %d Link DOWN successful",
1600                                dev->data->port_id);
1601         }
1602
1603         dev->data->dev_link.link_status = 0;
1604
1605         return ret;
1606 }
1607
1608 static int
1609 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1610 {
1611         int ret = -EINVAL;
1612         struct dpaa2_dev_priv *priv;
1613         struct fsl_mc_io *dpni;
1614         struct dpni_link_state state = {0};
1615
1616         PMD_INIT_FUNC_TRACE();
1617
1618         priv = dev->data->dev_private;
1619         dpni = (struct fsl_mc_io *)priv->hw;
1620
1621         if (dpni == NULL || fc_conf == NULL) {
1622                 DPAA2_PMD_ERR("device not configured");
1623                 return ret;
1624         }
1625
1626         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1627         if (ret) {
1628                 DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
1629                 return ret;
1630         }
1631
1632         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1633         if (state.options & DPNI_LINK_OPT_PAUSE) {
1634                 /* DPNI_LINK_OPT_PAUSE set
1635                  *  if ASYM_PAUSE not set,
1636                  *      RX Side flow control (handle received Pause frame)
1637                  *      TX side flow control (send Pause frame)
1638                  *  if ASYM_PAUSE set,
1639                  *      RX Side flow control (handle received Pause frame)
1640                  *      No TX side flow control (send Pause frame disabled)
1641                  */
1642                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1643                         fc_conf->mode = RTE_FC_FULL;
1644                 else
1645                         fc_conf->mode = RTE_FC_RX_PAUSE;
1646         } else {
1647                 /* DPNI_LINK_OPT_PAUSE not set
1648                  *  if ASYM_PAUSE set,
1649                  *      TX side flow control (send Pause frame)
1650                  *      No RX side flow control (No action on pause frame rx)
1651                  *  if ASYM_PAUSE not set,
1652                  *      Flow control disabled
1653                  */
1654                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1655                         fc_conf->mode = RTE_FC_TX_PAUSE;
1656                 else
1657                         fc_conf->mode = RTE_FC_NONE;
1658         }
1659
1660         return ret;
1661 }
1662
1663 static int
1664 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1665 {
1666         int ret = -EINVAL;
1667         struct dpaa2_dev_priv *priv;
1668         struct fsl_mc_io *dpni;
1669         struct dpni_link_state state = {0};
1670         struct dpni_link_cfg cfg = {0};
1671
1672         PMD_INIT_FUNC_TRACE();
1673
1674         priv = dev->data->dev_private;
1675         dpni = (struct fsl_mc_io *)priv->hw;
1676
1677         if (dpni == NULL) {
1678                 DPAA2_PMD_ERR("dpni is NULL");
1679                 return ret;
1680         }
1681
1682         /* It is necessary to obtain the current state before setting fc_conf
1683          * as MC would return error in case rate, autoneg or duplex values are
1684          * different.
1685          */
1686         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1687         if (ret) {
1688                 DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
1689                 return -1;
1690         }
1691
1692         /* Disable link before setting configuration */
1693         dpaa2_dev_set_link_down(dev);
1694
1695         /* Based on fc_conf, update cfg */
1696         cfg.rate = state.rate;
1697         cfg.options = state.options;
1698
1699         /* update cfg with fc_conf */
1700         switch (fc_conf->mode) {
1701         case RTE_FC_FULL:
1702                 /* Full flow control;
1703                  * OPT_PAUSE set, ASYM_PAUSE not set
1704                  */
1705                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1706                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1707                 break;
1708         case RTE_FC_TX_PAUSE:
1709                 /* Enable RX flow control
1710                  * OPT_PAUSE not set;
1711                  * ASYM_PAUSE set;
1712                  */
1713                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1714                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1715                 break;
1716         case RTE_FC_RX_PAUSE:
1717                 /* Enable TX Flow control
1718                  * OPT_PAUSE set
1719                  * ASYM_PAUSE set
1720                  */
1721                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1722                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1723                 break;
1724         case RTE_FC_NONE:
1725                 /* Disable Flow control
1726                  * OPT_PAUSE not set
1727                  * ASYM_PAUSE not set
1728                  */
1729                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1730                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1731                 break;
1732         default:
1733                 DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
1734                               fc_conf->mode);
1735                 return -1;
1736         }
1737
1738         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1739         if (ret)
1740                 DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
1741                               ret);
1742
1743         /* Enable link */
1744         dpaa2_dev_set_link_up(dev);
1745
1746         return ret;
1747 }
1748
1749 static int
1750 dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
1751                           struct rte_eth_rss_conf *rss_conf)
1752 {
1753         struct rte_eth_dev_data *data = dev->data;
1754         struct rte_eth_conf *eth_conf = &data->dev_conf;
1755         int ret;
1756
1757         PMD_INIT_FUNC_TRACE();
1758
1759         if (rss_conf->rss_hf) {
1760                 ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
1761                 if (ret) {
1762                         DPAA2_PMD_ERR("Unable to set flow dist");
1763                         return ret;
1764                 }
1765         } else {
1766                 ret = dpaa2_remove_flow_dist(dev, 0);
1767                 if (ret) {
1768                         DPAA2_PMD_ERR("Unable to remove flow dist");
1769                         return ret;
1770                 }
1771         }
1772         eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
1773         return 0;
1774 }
1775
1776 static int
1777 dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1778                             struct rte_eth_rss_conf *rss_conf)
1779 {
1780         struct rte_eth_dev_data *data = dev->data;
1781         struct rte_eth_conf *eth_conf = &data->dev_conf;
1782
1783         /* dpaa2 does not support rss_key, so length should be 0*/
1784         rss_conf->rss_key_len = 0;
1785         rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
1786         return 0;
1787 }
1788
1789 int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
1790                 int eth_rx_queue_id,
1791                 uint16_t dpcon_id,
1792                 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
1793 {
1794         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1795         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1796         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1797         uint8_t flow_id = dpaa2_ethq->flow_id;
1798         struct dpni_queue cfg;
1799         uint8_t options;
1800         int ret;
1801
1802         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
1803                 dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
1804         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
1805                 dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
1806         else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
1807                 dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
1808         else
1809                 return -EINVAL;
1810
1811         memset(&cfg, 0, sizeof(struct dpni_queue));
1812         options = DPNI_QUEUE_OPT_DEST;
1813         cfg.destination.type = DPNI_DEST_DPCON;
1814         cfg.destination.id = dpcon_id;
1815         cfg.destination.priority = queue_conf->ev.priority;
1816
1817         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
1818                 options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
1819                 cfg.destination.hold_active = 1;
1820         }
1821
1822         if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
1823                         !eth_priv->en_ordered) {
1824                 struct opr_cfg ocfg;
1825
1826                 /* Restoration window size = 256 frames */
1827                 ocfg.oprrws = 3;
1828                 /* Restoration window size = 512 frames for LX2 */
1829                 if (dpaa2_svr_family == SVR_LX2160A)
1830                         ocfg.oprrws = 4;
1831                 /* Auto advance NESN window enabled */
1832                 ocfg.oa = 1;
1833                 /* Late arrival window size disabled */
1834                 ocfg.olws = 0;
1835                 /* ORL resource exhaustaion advance NESN disabled */
1836                 ocfg.oeane = 0;
1837                 /* Loose ordering enabled */
1838                 ocfg.oloe = 1;
1839                 eth_priv->en_loose_ordered = 1;
1840                 /* Strict ordering enabled if explicitly set */
1841                 if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
1842                         ocfg.oloe = 0;
1843                         eth_priv->en_loose_ordered = 0;
1844                 }
1845
1846                 ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
1847                                    dpaa2_ethq->tc_index, flow_id,
1848                                    OPR_OPT_CREATE, &ocfg);
1849                 if (ret) {
1850                         DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
1851                         return ret;
1852                 }
1853
1854                 eth_priv->en_ordered = 1;
1855         }
1856
1857         options |= DPNI_QUEUE_OPT_USER_CTX;
1858         cfg.user_context = (size_t)(dpaa2_ethq);
1859
1860         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1861                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1862         if (ret) {
1863                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1864                 return ret;
1865         }
1866
1867         memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event));
1868
1869         return 0;
1870 }
1871
1872 int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
1873                 int eth_rx_queue_id)
1874 {
1875         struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
1876         struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw;
1877         struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
1878         uint8_t flow_id = dpaa2_ethq->flow_id;
1879         struct dpni_queue cfg;
1880         uint8_t options;
1881         int ret;
1882
1883         memset(&cfg, 0, sizeof(struct dpni_queue));
1884         options = DPNI_QUEUE_OPT_DEST;
1885         cfg.destination.type = DPNI_DEST_NONE;
1886
1887         ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
1888                              dpaa2_ethq->tc_index, flow_id, options, &cfg);
1889         if (ret)
1890                 DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
1891
1892         return ret;
1893 }
1894
1895 static struct eth_dev_ops dpaa2_ethdev_ops = {
1896         .dev_configure    = dpaa2_eth_dev_configure,
1897         .dev_start            = dpaa2_dev_start,
1898         .dev_stop             = dpaa2_dev_stop,
1899         .dev_close            = dpaa2_dev_close,
1900         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1901         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1902         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1903         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1904         .dev_set_link_up      = dpaa2_dev_set_link_up,
1905         .dev_set_link_down    = dpaa2_dev_set_link_down,
1906         .link_update       = dpaa2_dev_link_update,
1907         .stats_get             = dpaa2_dev_stats_get,
1908         .xstats_get            = dpaa2_dev_xstats_get,
1909         .xstats_get_by_id     = dpaa2_xstats_get_by_id,
1910         .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id,
1911         .xstats_get_names      = dpaa2_xstats_get_names,
1912         .stats_reset       = dpaa2_dev_stats_reset,
1913         .xstats_reset         = dpaa2_dev_stats_reset,
1914         .fw_version_get    = dpaa2_fw_version_get,
1915         .dev_infos_get     = dpaa2_dev_info_get,
1916         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1917         .mtu_set           = dpaa2_dev_mtu_set,
1918         .vlan_filter_set      = dpaa2_vlan_filter_set,
1919         .vlan_offload_set     = dpaa2_vlan_offload_set,
1920         .vlan_tpid_set        = dpaa2_vlan_tpid_set,
1921         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1922         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1923         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1924         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1925         .rx_queue_count       = dpaa2_dev_rx_queue_count,
1926         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
1927         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
1928         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1929         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1930         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1931         .rss_hash_update      = dpaa2_dev_rss_hash_update,
1932         .rss_hash_conf_get    = dpaa2_dev_rss_hash_conf_get,
1933 };
1934
1935 /* Populate the mac address from physically available (u-boot/firmware) and/or
1936  * one set by higher layers like MC (restool) etc.
1937  * Returns the table of MAC entries (multiple entries)
1938  */
1939 static int
1940 populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv,
1941                   struct ether_addr *mac_entry)
1942 {
1943         int ret;
1944         struct ether_addr phy_mac, prime_mac;
1945
1946         memset(&phy_mac, 0, sizeof(struct ether_addr));
1947         memset(&prime_mac, 0, sizeof(struct ether_addr));
1948
1949         /* Get the physical device MAC address */
1950         ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1951                                      phy_mac.addr_bytes);
1952         if (ret) {
1953                 DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret);
1954                 goto cleanup;
1955         }
1956
1957         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token,
1958                                         prime_mac.addr_bytes);
1959         if (ret) {
1960                 DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret);
1961                 goto cleanup;
1962         }
1963
1964         /* Now that both MAC have been obtained, do:
1965          *  if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy
1966          *     and return phy
1967          *  If empty_mac(phy), return prime.
1968          *  if both are empty, create random MAC, set as prime and return
1969          */
1970         if (!is_zero_ether_addr(&phy_mac)) {
1971                 /* If the addresses are not same, overwrite prime */
1972                 if (!is_same_ether_addr(&phy_mac, &prime_mac)) {
1973                         ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1974                                                         priv->token,
1975                                                         phy_mac.addr_bytes);
1976                         if (ret) {
1977                                 DPAA2_PMD_ERR("Unable to set MAC Address: %d",
1978                                               ret);
1979                                 goto cleanup;
1980                         }
1981                         memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr));
1982                 }
1983         } else if (is_zero_ether_addr(&prime_mac)) {
1984                 /* In case phys and prime, both are zero, create random MAC */
1985                 eth_random_addr(prime_mac.addr_bytes);
1986                 ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1987                                                 priv->token,
1988                                                 prime_mac.addr_bytes);
1989                 if (ret) {
1990                         DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret);
1991                         goto cleanup;
1992                 }
1993         }
1994
1995         /* prime_mac the final MAC address */
1996         memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr));
1997         return 0;
1998
1999 cleanup:
2000         return -1;
2001 }
2002
2003 static int
2004 check_devargs_handler(__rte_unused const char *key, const char *value,
2005                       __rte_unused void *opaque)
2006 {
2007         if (strcmp(value, "1"))
2008                 return -1;
2009
2010         return 0;
2011 }
2012
2013 static int
2014 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
2015 {
2016         struct rte_kvargs *kvlist;
2017
2018         if (!devargs)
2019                 return 0;
2020
2021         kvlist = rte_kvargs_parse(devargs->args, NULL);
2022         if (!kvlist)
2023                 return 0;
2024
2025         if (!rte_kvargs_count(kvlist, key)) {
2026                 rte_kvargs_free(kvlist);
2027                 return 0;
2028         }
2029
2030         if (rte_kvargs_process(kvlist, key,
2031                                check_devargs_handler, NULL) < 0) {
2032                 rte_kvargs_free(kvlist);
2033                 return 0;
2034         }
2035         rte_kvargs_free(kvlist);
2036
2037         return 1;
2038 }
2039
2040 static int
2041 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
2042 {
2043         struct rte_device *dev = eth_dev->device;
2044         struct rte_dpaa2_device *dpaa2_dev;
2045         struct fsl_mc_io *dpni_dev;
2046         struct dpni_attr attr;
2047         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2048         struct dpni_buffer_layout layout;
2049         int ret, hw_id;
2050
2051         PMD_INIT_FUNC_TRACE();
2052
2053         /* For secondary processes, the primary has done all the work */
2054         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2055                 /* In case of secondary, only burst and ops API need to be
2056                  * plugged.
2057                  */
2058                 eth_dev->dev_ops = &dpaa2_ethdev_ops;
2059                 if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
2060                         eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2061                 else
2062                         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2063                 eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2064                 return 0;
2065         }
2066
2067         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2068
2069         hw_id = dpaa2_dev->object_id;
2070
2071         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
2072         if (!dpni_dev) {
2073                 DPAA2_PMD_ERR("Memory allocation failed for dpni device");
2074                 return -1;
2075         }
2076
2077         dpni_dev->regs = rte_mcp_ptr_list[0];
2078         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
2079         if (ret) {
2080                 DPAA2_PMD_ERR(
2081                              "Failure in opening dpni@%d with err code %d",
2082                              hw_id, ret);
2083                 rte_free(dpni_dev);
2084                 return -1;
2085         }
2086
2087         /* Clean the device first */
2088         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
2089         if (ret) {
2090                 DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
2091                               hw_id, ret);
2092                 goto init_err;
2093         }
2094
2095         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
2096         if (ret) {
2097                 DPAA2_PMD_ERR(
2098                              "Failure in get dpni@%d attribute, err code %d",
2099                              hw_id, ret);
2100                 goto init_err;
2101         }
2102
2103         priv->num_rx_tc = attr.num_rx_tcs;
2104
2105         /* Resetting the "num_rx_queues" to equal number of queues in first TC
2106          * as only one TC is supported on Rx Side. Once Multiple TCs will be
2107          * in use for Rx processing then this will be changed or removed.
2108          */
2109         priv->nb_rx_queues = attr.num_queues;
2110
2111         /* Using number of TX queues as number of TX TCs */
2112         priv->nb_tx_queues = attr.num_tx_tcs;
2113
2114         DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
2115                         priv->num_rx_tc, priv->nb_rx_queues,
2116                         priv->nb_tx_queues);
2117
2118         priv->hw = dpni_dev;
2119         priv->hw_id = hw_id;
2120         priv->options = attr.options;
2121         priv->max_mac_filters = attr.mac_filter_entries;
2122         priv->max_vlan_filters = attr.vlan_filter_entries;
2123         priv->flags = 0;
2124
2125         /* Allocate memory for hardware structure for queues */
2126         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
2127         if (ret) {
2128                 DPAA2_PMD_ERR("Queue allocation Failed");
2129                 goto init_err;
2130         }
2131
2132         /* Allocate memory for storing MAC addresses.
2133          * Table of mac_filter_entries size is allocated so that RTE ether lib
2134          * can add MAC entries when rte_eth_dev_mac_addr_add is called.
2135          */
2136         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
2137                 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
2138         if (eth_dev->data->mac_addrs == NULL) {
2139                 DPAA2_PMD_ERR(
2140                    "Failed to allocate %d bytes needed to store MAC addresses",
2141                    ETHER_ADDR_LEN * attr.mac_filter_entries);
2142                 ret = -ENOMEM;
2143                 goto init_err;
2144         }
2145
2146         ret = populate_mac_addr(dpni_dev, priv, &eth_dev->data->mac_addrs[0]);
2147         if (ret) {
2148                 DPAA2_PMD_ERR("Unable to fetch MAC Address for device");
2149                 rte_free(eth_dev->data->mac_addrs);
2150                 eth_dev->data->mac_addrs = NULL;
2151                 goto init_err;
2152         }
2153
2154         /* ... tx buffer layout ... */
2155         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2156         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2157         layout.pass_frame_status = 1;
2158         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2159                                      DPNI_QUEUE_TX, &layout);
2160         if (ret) {
2161                 DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
2162                 goto init_err;
2163         }
2164
2165         /* ... tx-conf and error buffer layout ... */
2166         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
2167         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
2168         layout.pass_frame_status = 1;
2169         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
2170                                      DPNI_QUEUE_TX_CONFIRM, &layout);
2171         if (ret) {
2172                 DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
2173                              ret);
2174                 goto init_err;
2175         }
2176
2177         eth_dev->dev_ops = &dpaa2_ethdev_ops;
2178
2179         if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
2180                 eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
2181                 DPAA2_PMD_INFO("Loopback mode");
2182         } else {
2183                 eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
2184         }
2185         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
2186
2187         RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
2188         return 0;
2189 init_err:
2190         dpaa2_dev_uninit(eth_dev);
2191         return ret;
2192 }
2193
2194 static int
2195 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
2196 {
2197         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
2198         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2199         int ret;
2200
2201         PMD_INIT_FUNC_TRACE();
2202
2203         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2204                 return 0;
2205
2206         if (!dpni) {
2207                 DPAA2_PMD_WARN("Already closed or not started");
2208                 return -1;
2209         }
2210
2211         dpaa2_dev_close(eth_dev);
2212
2213         dpaa2_free_rx_tx_queues(eth_dev);
2214
2215         /* Close the device at underlying layer*/
2216         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
2217         if (ret) {
2218                 DPAA2_PMD_ERR(
2219                              "Failure closing dpni device with err code %d",
2220                              ret);
2221         }
2222
2223         /* Free the allocated memory for ethernet private data and dpni*/
2224         priv->hw = NULL;
2225         rte_free(dpni);
2226
2227         eth_dev->dev_ops = NULL;
2228         eth_dev->rx_pkt_burst = NULL;
2229         eth_dev->tx_pkt_burst = NULL;
2230
2231         DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
2232         return 0;
2233 }
2234
2235 static int
2236 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
2237                 struct rte_dpaa2_device *dpaa2_dev)
2238 {
2239         struct rte_eth_dev *eth_dev;
2240         int diag;
2241
2242         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2243                 eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name);
2244                 if (!eth_dev)
2245                         return -ENODEV;
2246                 eth_dev->data->dev_private = rte_zmalloc(
2247                                                 "ethdev private structure",
2248                                                 sizeof(struct dpaa2_dev_priv),
2249                                                 RTE_CACHE_LINE_SIZE);
2250                 if (eth_dev->data->dev_private == NULL) {
2251                         DPAA2_PMD_CRIT(
2252                                 "Unable to allocate memory for private data");
2253                         rte_eth_dev_release_port(eth_dev);
2254                         return -ENOMEM;
2255                 }
2256         } else {
2257                 eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name);
2258                 if (!eth_dev)
2259                         return -ENODEV;
2260         }
2261
2262         eth_dev->device = &dpaa2_dev->device;
2263
2264         dpaa2_dev->eth_dev = eth_dev;
2265         eth_dev->data->rx_mbuf_alloc_failed = 0;
2266
2267         if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
2268                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2269
2270         /* Invoke PMD device initialization function */
2271         diag = dpaa2_dev_init(eth_dev);
2272         if (diag == 0) {
2273                 rte_eth_dev_probing_finish(eth_dev);
2274                 return 0;
2275         }
2276
2277         rte_eth_dev_release_port(eth_dev);
2278         return diag;
2279 }
2280
2281 static int
2282 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
2283 {
2284         struct rte_eth_dev *eth_dev;
2285
2286         eth_dev = dpaa2_dev->eth_dev;
2287         dpaa2_dev_uninit(eth_dev);
2288
2289         rte_eth_dev_release_port(eth_dev);
2290
2291         return 0;
2292 }
2293
2294 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
2295         .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA,
2296         .drv_type = DPAA2_ETH,
2297         .probe = rte_dpaa2_probe,
2298         .remove = rte_dpaa2_remove,
2299 };
2300
2301 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
2302 RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
2303                 DRIVER_LOOPBACK_MODE "=<int>");
2304 RTE_INIT(dpaa2_pmd_init_log)
2305 {
2306         dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
2307         if (dpaa2_logtype_pmd >= 0)
2308                 rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
2309 }