net/dpaa2: set device driver
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
47
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <dpaa2_hw_dpio.h>
53 #include <mc/fsl_dpmng.h>
54 #include "dpaa2_ethdev.h"
55
56 static struct rte_dpaa2_driver rte_dpaa2_pmd;
57 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
58 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
59 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
60 static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
61
62 /**
63  * Atomically reads the link status information from global
64  * structure rte_eth_dev.
65  *
66  * @param dev
67  *   - Pointer to the structure rte_eth_dev to read from.
68  *   - Pointer to the buffer to be saved with the link status.
69  *
70  * @return
71  *   - On success, zero.
72  *   - On failure, negative value.
73  */
74 static inline int
75 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
76                                   struct rte_eth_link *link)
77 {
78         struct rte_eth_link *dst = link;
79         struct rte_eth_link *src = &dev->data->dev_link;
80
81         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
82                                 *(uint64_t *)src) == 0)
83                 return -1;
84
85         return 0;
86 }
87
88 /**
89  * Atomically writes the link status information into global
90  * structure rte_eth_dev.
91  *
92  * @param dev
93  *   - Pointer to the structure rte_eth_dev to read from.
94  *   - Pointer to the buffer to be saved with the link status.
95  *
96  * @return
97  *   - On success, zero.
98  *   - On failure, negative value.
99  */
100 static inline int
101 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
102                                    struct rte_eth_link *link)
103 {
104         struct rte_eth_link *dst = &dev->data->dev_link;
105         struct rte_eth_link *src = link;
106
107         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
108                                 *(uint64_t *)src) == 0)
109                 return -1;
110
111         return 0;
112 }
113
114 static int
115 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
116 {
117         int ret;
118         struct dpaa2_dev_priv *priv = dev->data->dev_private;
119         struct fsl_mc_io *dpni = priv->hw;
120
121         PMD_INIT_FUNC_TRACE();
122
123         if (dpni == NULL) {
124                 RTE_LOG(ERR, PMD, "dpni is NULL");
125                 return -1;
126         }
127
128         if (on)
129                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
130                                        priv->token, vlan_id);
131         else
132                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
133                                           priv->token, vlan_id);
134
135         if (ret < 0)
136                 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
137                             ret, vlan_id, priv->hw_id);
138
139         return ret;
140 }
141
142 static void
143 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
144 {
145         struct dpaa2_dev_priv *priv = dev->data->dev_private;
146         struct fsl_mc_io *dpni = priv->hw;
147         int ret;
148
149         PMD_INIT_FUNC_TRACE();
150
151         if (mask & ETH_VLAN_FILTER_MASK) {
152                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
153                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
154                                                       priv->token, true);
155                 else
156                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
157                                                       priv->token, false);
158                 if (ret < 0)
159                         RTE_LOG(ERR, PMD, "Unable to set vlan filter ret = %d",
160                                 ret);
161         }
162 }
163
164 static int
165 dpaa2_fw_version_get(struct rte_eth_dev *dev,
166                      char *fw_version,
167                      size_t fw_size)
168 {
169         int ret;
170         struct dpaa2_dev_priv *priv = dev->data->dev_private;
171         struct fsl_mc_io *dpni = priv->hw;
172         struct mc_soc_version mc_plat_info = {0};
173         struct mc_version mc_ver_info = {0};
174
175         PMD_INIT_FUNC_TRACE();
176
177         if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
178                 RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
179
180         if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
181                 RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
182
183         ret = snprintf(fw_version, fw_size,
184                        "%x-%d.%d.%d",
185                        mc_plat_info.svr,
186                        mc_ver_info.major,
187                        mc_ver_info.minor,
188                        mc_ver_info.revision);
189
190         ret += 1; /* add the size of '\0' */
191         if (fw_size < (uint32_t)ret)
192                 return ret;
193         else
194                 return 0;
195 }
196
197 static void
198 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
199 {
200         struct dpaa2_dev_priv *priv = dev->data->dev_private;
201
202         PMD_INIT_FUNC_TRACE();
203
204         dev_info->if_index = priv->hw_id;
205
206         dev_info->max_mac_addrs = priv->max_mac_filters;
207         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
208         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
209         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
210         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
211         dev_info->rx_offload_capa =
212                 DEV_RX_OFFLOAD_IPV4_CKSUM |
213                 DEV_RX_OFFLOAD_UDP_CKSUM |
214                 DEV_RX_OFFLOAD_TCP_CKSUM |
215                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
216         dev_info->tx_offload_capa =
217                 DEV_TX_OFFLOAD_IPV4_CKSUM |
218                 DEV_TX_OFFLOAD_UDP_CKSUM |
219                 DEV_TX_OFFLOAD_TCP_CKSUM |
220                 DEV_TX_OFFLOAD_SCTP_CKSUM |
221                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
222         dev_info->speed_capa = ETH_LINK_SPEED_1G |
223                         ETH_LINK_SPEED_2_5G |
224                         ETH_LINK_SPEED_10G;
225 }
226
227 static int
228 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
229 {
230         struct dpaa2_dev_priv *priv = dev->data->dev_private;
231         uint16_t dist_idx;
232         uint32_t vq_id;
233         struct dpaa2_queue *mc_q, *mcq;
234         uint32_t tot_queues;
235         int i;
236         struct dpaa2_queue *dpaa2_q;
237
238         PMD_INIT_FUNC_TRACE();
239
240         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
241         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
242                           RTE_CACHE_LINE_SIZE);
243         if (!mc_q) {
244                 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
245                 return -1;
246         }
247
248         for (i = 0; i < priv->nb_rx_queues; i++) {
249                 mc_q->dev = dev;
250                 priv->rx_vq[i] = mc_q++;
251                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
252                 dpaa2_q->q_storage = rte_malloc("dq_storage",
253                                         sizeof(struct queue_storage_info_t),
254                                         RTE_CACHE_LINE_SIZE);
255                 if (!dpaa2_q->q_storage)
256                         goto fail;
257
258                 memset(dpaa2_q->q_storage, 0,
259                        sizeof(struct queue_storage_info_t));
260                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
261                         goto fail;
262         }
263
264         for (i = 0; i < priv->nb_tx_queues; i++) {
265                 mc_q->dev = dev;
266                 mc_q->flow_id = 0xffff;
267                 priv->tx_vq[i] = mc_q++;
268                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
269                 dpaa2_q->cscn = rte_malloc(NULL,
270                                            sizeof(struct qbman_result), 16);
271                 if (!dpaa2_q->cscn)
272                         goto fail_tx;
273         }
274
275         vq_id = 0;
276         for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
277              dist_idx++) {
278                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
279                 mcq->tc_index = DPAA2_DEF_TC;
280                 mcq->flow_id = dist_idx;
281                 vq_id++;
282         }
283
284         return 0;
285 fail_tx:
286         i -= 1;
287         while (i >= 0) {
288                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
289                 rte_free(dpaa2_q->cscn);
290                 priv->tx_vq[i--] = NULL;
291         }
292         i = priv->nb_rx_queues;
293 fail:
294         i -= 1;
295         mc_q = priv->rx_vq[0];
296         while (i >= 0) {
297                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
298                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
299                 rte_free(dpaa2_q->q_storage);
300                 priv->rx_vq[i--] = NULL;
301         }
302         rte_free(mc_q);
303         return -1;
304 }
305
306 static int
307 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
308 {
309         struct rte_eth_dev_data *data = dev->data;
310         struct rte_eth_conf *eth_conf = &data->dev_conf;
311         int ret;
312
313         PMD_INIT_FUNC_TRACE();
314
315         if (eth_conf->rxmode.jumbo_frame == 1) {
316                 if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
317                         ret = dpaa2_dev_mtu_set(dev,
318                                         eth_conf->rxmode.max_rx_pkt_len);
319                         if (ret) {
320                                 PMD_INIT_LOG(ERR,
321                                              "unable to set mtu. check config\n");
322                                 return ret;
323                         }
324                 } else {
325                         return -1;
326                 }
327         }
328
329         /* Check for correct configuration */
330         if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
331             data->nb_rx_queues > 1) {
332                 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
333                             "but Rx queues more than 1\n");
334                 return -1;
335         }
336
337         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
338                 /* Return in case number of Rx queues is 1 */
339                 if (data->nb_rx_queues == 1)
340                         return 0;
341                 ret = dpaa2_setup_flow_dist(dev,
342                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
343                 if (ret) {
344                         PMD_INIT_LOG(ERR, "unable to set flow distribution."
345                                      "please check queue config\n");
346                         return ret;
347                 }
348         }
349         return 0;
350 }
351
352 /* Function to setup RX flow information. It contains traffic class ID,
353  * flow ID, destination configuration etc.
354  */
355 static int
356 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
357                          uint16_t rx_queue_id,
358                          uint16_t nb_rx_desc __rte_unused,
359                          unsigned int socket_id __rte_unused,
360                          const struct rte_eth_rxconf *rx_conf __rte_unused,
361                          struct rte_mempool *mb_pool)
362 {
363         struct dpaa2_dev_priv *priv = dev->data->dev_private;
364         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
365         struct dpaa2_queue *dpaa2_q;
366         struct dpni_queue cfg;
367         uint8_t options = 0;
368         uint8_t flow_id;
369         uint32_t bpid;
370         int ret;
371
372         PMD_INIT_FUNC_TRACE();
373
374         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
375                      dev, rx_queue_id, mb_pool, rx_conf);
376
377         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
378                 bpid = mempool_to_bpid(mb_pool);
379                 ret = dpaa2_attach_bp_list(priv,
380                                            rte_dpaa2_bpid_info[bpid].bp_list);
381                 if (ret)
382                         return ret;
383         }
384         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
385         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
386
387         /*Get the tc id and flow id from given VQ id*/
388         flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
389         memset(&cfg, 0, sizeof(struct dpni_queue));
390
391         options = options | DPNI_QUEUE_OPT_USER_CTX;
392         cfg.user_context = (uint64_t)(dpaa2_q);
393
394         /*if ls2088 or rev2 device, enable the stashing */
395         if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
396                 options |= DPNI_QUEUE_OPT_FLC;
397                 cfg.flc.stash_control = true;
398                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
399                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
400                  * data stashing setting 01 01 00 (0x14) to enable
401                  * 1 line data, 1 line annotation
402                  */
403                 cfg.flc.value |= 0x14;
404         }
405         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
406                              dpaa2_q->tc_index, flow_id, options, &cfg);
407         if (ret) {
408                 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
409                 return -1;
410         }
411
412         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
413                 struct dpni_taildrop taildrop;
414
415                 taildrop.enable = 1;
416                 /*enabling per rx queue congestion control */
417                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
418                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
419                 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
420                              rx_queue_id);
421                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
422                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
423                                         dpaa2_q->tc_index, flow_id, &taildrop);
424                 if (ret) {
425                         PMD_INIT_LOG(ERR, "Error in setting the rx flow"
426                                      " err : = %d\n", ret);
427                         return -1;
428                 }
429         }
430
431         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
432         return 0;
433 }
434
435 static int
436 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
437                          uint16_t tx_queue_id,
438                          uint16_t nb_tx_desc __rte_unused,
439                          unsigned int socket_id __rte_unused,
440                          const struct rte_eth_txconf *tx_conf __rte_unused)
441 {
442         struct dpaa2_dev_priv *priv = dev->data->dev_private;
443         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
444                 priv->tx_vq[tx_queue_id];
445         struct fsl_mc_io *dpni = priv->hw;
446         struct dpni_queue tx_conf_cfg;
447         struct dpni_queue tx_flow_cfg;
448         uint8_t options = 0, flow_id;
449         uint32_t tc_id;
450         int ret;
451
452         PMD_INIT_FUNC_TRACE();
453
454         /* Return if queue already configured */
455         if (dpaa2_q->flow_id != 0xffff)
456                 return 0;
457
458         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
459         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
460
461         if (priv->num_tc == 1) {
462                 tc_id = 0;
463                 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
464         } else {
465                 tc_id = tx_queue_id;
466                 flow_id = 0;
467         }
468
469         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
470                              tc_id, flow_id, options, &tx_flow_cfg);
471         if (ret) {
472                 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
473                              "tc_id=%d, flow =%d ErrorCode = %x\n",
474                              tc_id, flow_id, -ret);
475                         return -1;
476         }
477
478         dpaa2_q->flow_id = flow_id;
479
480         if (tx_queue_id == 0) {
481                 /*Set tx-conf and error configuration*/
482                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
483                                                     priv->token,
484                                                     DPNI_CONF_DISABLE);
485                 if (ret) {
486                         PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
487                                      " ErrorCode = %x", ret);
488                         return -1;
489                 }
490         }
491         dpaa2_q->tc_index = tc_id;
492
493         if (priv->flags & DPAA2_TX_CGR_SUPPORT) {
494                 struct dpni_congestion_notification_cfg cong_notif_cfg;
495
496                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
497                 /* Notify about congestion when the queue size is 32 KB */
498                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
499                 /* Notify that the queue is not congested when the data in
500                  * the queue is below this thershold.
501                  */
502                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
503                 cong_notif_cfg.message_ctx = 0;
504                 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
505                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
506                 cong_notif_cfg.notification_mode =
507                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
508                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
509                                          DPNI_CONG_OPT_COHERENT_WRITE;
510
511                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
512                                                        priv->token,
513                                                        DPNI_QUEUE_TX,
514                                                        tc_id,
515                                                        &cong_notif_cfg);
516                 if (ret) {
517                         PMD_INIT_LOG(ERR,
518                            "Error in setting tx congestion notification: = %d",
519                            -ret);
520                         return -ret;
521                 }
522         }
523         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
524         return 0;
525 }
526
527 static void
528 dpaa2_dev_rx_queue_release(void *q __rte_unused)
529 {
530         PMD_INIT_FUNC_TRACE();
531 }
532
533 static void
534 dpaa2_dev_tx_queue_release(void *q __rte_unused)
535 {
536         PMD_INIT_FUNC_TRACE();
537 }
538
539 static const uint32_t *
540 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
541 {
542         static const uint32_t ptypes[] = {
543                 /*todo -= add more types */
544                 RTE_PTYPE_L2_ETHER,
545                 RTE_PTYPE_L3_IPV4,
546                 RTE_PTYPE_L3_IPV4_EXT,
547                 RTE_PTYPE_L3_IPV6,
548                 RTE_PTYPE_L3_IPV6_EXT,
549                 RTE_PTYPE_L4_TCP,
550                 RTE_PTYPE_L4_UDP,
551                 RTE_PTYPE_L4_SCTP,
552                 RTE_PTYPE_L4_ICMP,
553                 RTE_PTYPE_UNKNOWN
554         };
555
556         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
557                 return ptypes;
558         return NULL;
559 }
560
561 static int
562 dpaa2_dev_start(struct rte_eth_dev *dev)
563 {
564         struct rte_eth_dev_data *data = dev->data;
565         struct dpaa2_dev_priv *priv = data->dev_private;
566         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
567         struct dpni_queue cfg;
568         struct dpni_error_cfg   err_cfg;
569         uint16_t qdid;
570         struct dpni_queue_id qid;
571         struct dpaa2_queue *dpaa2_q;
572         int ret, i;
573
574         PMD_INIT_FUNC_TRACE();
575
576         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
577         if (ret) {
578                 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
579                              ret, priv->hw_id);
580                 return ret;
581         }
582
583         /* Power up the phy. Needed to make the link go Up */
584         dpaa2_dev_set_link_up(dev);
585
586         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
587                             DPNI_QUEUE_TX, &qdid);
588         if (ret) {
589                 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
590                 return ret;
591         }
592         priv->qdid = qdid;
593
594         for (i = 0; i < data->nb_rx_queues; i++) {
595                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
596                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
597                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
598                                        dpaa2_q->flow_id, &cfg, &qid);
599                 if (ret) {
600                         PMD_INIT_LOG(ERR, "Error to get flow "
601                                      "information Error code = %d\n", ret);
602                         return ret;
603                 }
604                 dpaa2_q->fqid = qid.fqid;
605         }
606
607         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
608                                DPNI_OFF_RX_L3_CSUM, true);
609         if (ret) {
610                 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
611                 return ret;
612         }
613
614         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
615                                DPNI_OFF_RX_L4_CSUM, true);
616         if (ret) {
617                 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
618                 return ret;
619         }
620
621         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
622                                DPNI_OFF_TX_L3_CSUM, true);
623         if (ret) {
624                 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
625                 return ret;
626         }
627
628         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
629                                DPNI_OFF_TX_L4_CSUM, true);
630         if (ret) {
631                 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
632                 return ret;
633         }
634
635         /*checksum errors, send them to normal path and set it in annotation */
636         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
637
638         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
639         err_cfg.set_frame_annotation = true;
640
641         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
642                                        priv->token, &err_cfg);
643         if (ret) {
644                 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
645                              "code = %d\n", ret);
646                 return ret;
647         }
648         /* VLAN Offload Settings */
649         if (priv->max_vlan_filters)
650                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
651
652         return 0;
653 }
654
655 /**
656  *  This routine disables all traffic on the adapter by issuing a
657  *  global reset on the MAC.
658  */
659 static void
660 dpaa2_dev_stop(struct rte_eth_dev *dev)
661 {
662         struct dpaa2_dev_priv *priv = dev->data->dev_private;
663         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
664         int ret;
665         struct rte_eth_link link;
666
667         PMD_INIT_FUNC_TRACE();
668
669         dpaa2_dev_set_link_down(dev);
670
671         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
672         if (ret) {
673                 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
674                              ret, priv->hw_id);
675                 return;
676         }
677
678         /* clear the recorded link status */
679         memset(&link, 0, sizeof(link));
680         dpaa2_dev_atomic_write_link_status(dev, &link);
681 }
682
683 static void
684 dpaa2_dev_close(struct rte_eth_dev *dev)
685 {
686         struct rte_eth_dev_data *data = dev->data;
687         struct dpaa2_dev_priv *priv = dev->data->dev_private;
688         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
689         int i, ret;
690         struct rte_eth_link link;
691         struct dpaa2_queue *dpaa2_q;
692
693         PMD_INIT_FUNC_TRACE();
694
695         for (i = 0; i < data->nb_tx_queues; i++) {
696                 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
697                 if (!dpaa2_q->cscn) {
698                         rte_free(dpaa2_q->cscn);
699                         dpaa2_q->cscn = NULL;
700                 }
701         }
702
703         /* Clean the device first */
704         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
705         if (ret) {
706                 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
707                              " error code %d\n", ret);
708                 return;
709         }
710
711         memset(&link, 0, sizeof(link));
712         dpaa2_dev_atomic_write_link_status(dev, &link);
713 }
714
715 static void
716 dpaa2_dev_promiscuous_enable(
717                 struct rte_eth_dev *dev)
718 {
719         int ret;
720         struct dpaa2_dev_priv *priv = dev->data->dev_private;
721         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
722
723         PMD_INIT_FUNC_TRACE();
724
725         if (dpni == NULL) {
726                 RTE_LOG(ERR, PMD, "dpni is NULL");
727                 return;
728         }
729
730         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
731         if (ret < 0)
732                 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d", ret);
733
734         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
735         if (ret < 0)
736                 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d", ret);
737 }
738
739 static void
740 dpaa2_dev_promiscuous_disable(
741                 struct rte_eth_dev *dev)
742 {
743         int ret;
744         struct dpaa2_dev_priv *priv = dev->data->dev_private;
745         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
746
747         PMD_INIT_FUNC_TRACE();
748
749         if (dpni == NULL) {
750                 RTE_LOG(ERR, PMD, "dpni is NULL");
751                 return;
752         }
753
754         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
755         if (ret < 0)
756                 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d", ret);
757
758         if (dev->data->all_multicast == 0) {
759                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
760                                                  priv->token, false);
761                 if (ret < 0)
762                         RTE_LOG(ERR, PMD, "Unable to disable M promisc mode %d",
763                                 ret);
764         }
765 }
766
767 static void
768 dpaa2_dev_allmulticast_enable(
769                 struct rte_eth_dev *dev)
770 {
771         int ret;
772         struct dpaa2_dev_priv *priv = dev->data->dev_private;
773         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
774
775         PMD_INIT_FUNC_TRACE();
776
777         if (dpni == NULL) {
778                 RTE_LOG(ERR, PMD, "dpni is NULL");
779                 return;
780         }
781
782         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
783         if (ret < 0)
784                 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d", ret);
785 }
786
787 static void
788 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
789 {
790         int ret;
791         struct dpaa2_dev_priv *priv = dev->data->dev_private;
792         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
793
794         PMD_INIT_FUNC_TRACE();
795
796         if (dpni == NULL) {
797                 RTE_LOG(ERR, PMD, "dpni is NULL");
798                 return;
799         }
800
801         /* must remain on for all promiscuous */
802         if (dev->data->promiscuous == 1)
803                 return;
804
805         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
806         if (ret < 0)
807                 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d", ret);
808 }
809
810 static int
811 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
812 {
813         int ret;
814         struct dpaa2_dev_priv *priv = dev->data->dev_private;
815         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
816         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
817
818         PMD_INIT_FUNC_TRACE();
819
820         if (dpni == NULL) {
821                 RTE_LOG(ERR, PMD, "dpni is NULL");
822                 return -EINVAL;
823         }
824
825         /* check that mtu is within the allowed range */
826         if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
827                 return -EINVAL;
828
829         if (frame_size > ETHER_MAX_LEN)
830                 dev->data->dev_conf.rxmode.jumbo_frame = 1;
831         else
832                 dev->data->dev_conf.rxmode.jumbo_frame = 0;
833
834         /* Set the Max Rx frame length as 'mtu' +
835          * Maximum Ethernet header length
836          */
837         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
838                                         mtu + ETH_VLAN_HLEN);
839         if (ret) {
840                 PMD_DRV_LOG(ERR, "setting the max frame length failed");
841                 return -1;
842         }
843         PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
844         return 0;
845 }
846
847 static int
848 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
849                        struct ether_addr *addr,
850                        __rte_unused uint32_t index,
851                        __rte_unused uint32_t pool)
852 {
853         int ret;
854         struct dpaa2_dev_priv *priv = dev->data->dev_private;
855         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
856
857         PMD_INIT_FUNC_TRACE();
858
859         if (dpni == NULL) {
860                 RTE_LOG(ERR, PMD, "dpni is NULL");
861                 return -1;
862         }
863
864         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
865                                 priv->token, addr->addr_bytes);
866         if (ret)
867                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
868                         " err = %d", ret);
869         return 0;
870 }
871
872 static void
873 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
874                           uint32_t index)
875 {
876         int ret;
877         struct dpaa2_dev_priv *priv = dev->data->dev_private;
878         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
879         struct rte_eth_dev_data *data = dev->data;
880         struct ether_addr *macaddr;
881
882         PMD_INIT_FUNC_TRACE();
883
884         macaddr = &data->mac_addrs[index];
885
886         if (dpni == NULL) {
887                 RTE_LOG(ERR, PMD, "dpni is NULL");
888                 return;
889         }
890
891         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
892                                    priv->token, macaddr->addr_bytes);
893         if (ret)
894                 RTE_LOG(ERR, PMD, "error: Removing the MAC ADDR failed:"
895                         " err = %d", ret);
896 }
897
898 static void
899 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
900                        struct ether_addr *addr)
901 {
902         int ret;
903         struct dpaa2_dev_priv *priv = dev->data->dev_private;
904         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
905
906         PMD_INIT_FUNC_TRACE();
907
908         if (dpni == NULL) {
909                 RTE_LOG(ERR, PMD, "dpni is NULL");
910                 return;
911         }
912
913         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
914                                         priv->token, addr->addr_bytes);
915
916         if (ret)
917                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
918 }
919 static
920 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
921                          struct rte_eth_stats *stats)
922 {
923         struct dpaa2_dev_priv *priv = dev->data->dev_private;
924         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
925         int32_t  retcode;
926         uint8_t page0 = 0, page1 = 1, page2 = 2;
927         union dpni_statistics value;
928
929         memset(&value, 0, sizeof(union dpni_statistics));
930
931         PMD_INIT_FUNC_TRACE();
932
933         if (!dpni) {
934                 RTE_LOG(ERR, PMD, "dpni is NULL");
935                 return;
936         }
937
938         if (!stats) {
939                 RTE_LOG(ERR, PMD, "stats is NULL");
940                 return;
941         }
942
943         /*Get Counters from page_0*/
944         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
945                                       page0, &value);
946         if (retcode)
947                 goto err;
948
949         stats->ipackets = value.page_0.ingress_all_frames;
950         stats->ibytes = value.page_0.ingress_all_bytes;
951
952         /*Get Counters from page_1*/
953         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
954                                       page1, &value);
955         if (retcode)
956                 goto err;
957
958         stats->opackets = value.page_1.egress_all_frames;
959         stats->obytes = value.page_1.egress_all_bytes;
960
961         /*Get Counters from page_2*/
962         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
963                                       page2, &value);
964         if (retcode)
965                 goto err;
966
967         /* Ingress drop frame count due to configured rules */
968         stats->ierrors = value.page_2.ingress_filtered_frames;
969         /* Ingress drop frame count due to error */
970         stats->ierrors += value.page_2.ingress_discarded_frames;
971
972         stats->oerrors = value.page_2.egress_discarded_frames;
973         stats->imissed = value.page_2.ingress_nobuffer_discards;
974
975         return;
976
977 err:
978         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
979         return;
980 };
981
982 static
983 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
984 {
985         struct dpaa2_dev_priv *priv = dev->data->dev_private;
986         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
987         int32_t  retcode;
988
989         PMD_INIT_FUNC_TRACE();
990
991         if (dpni == NULL) {
992                 RTE_LOG(ERR, PMD, "dpni is NULL");
993                 return;
994         }
995
996         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
997         if (retcode)
998                 goto error;
999
1000         return;
1001
1002 error:
1003         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
1004         return;
1005 };
1006
1007 /* return 0 means link status changed, -1 means not changed */
1008 static int
1009 dpaa2_dev_link_update(struct rte_eth_dev *dev,
1010                         int wait_to_complete __rte_unused)
1011 {
1012         int ret;
1013         struct dpaa2_dev_priv *priv = dev->data->dev_private;
1014         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1015         struct rte_eth_link link, old;
1016         struct dpni_link_state state = {0};
1017
1018         PMD_INIT_FUNC_TRACE();
1019
1020         if (dpni == NULL) {
1021                 RTE_LOG(ERR, PMD, "error : dpni is NULL");
1022                 return 0;
1023         }
1024         memset(&old, 0, sizeof(old));
1025         dpaa2_dev_atomic_read_link_status(dev, &old);
1026
1027         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1028         if (ret < 0) {
1029                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
1030                 return -1;
1031         }
1032
1033         if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
1034                 RTE_LOG(DEBUG, PMD, "No change in status\n");
1035                 return -1;
1036         }
1037
1038         memset(&link, 0, sizeof(struct rte_eth_link));
1039         link.link_status = state.up;
1040         link.link_speed = state.rate;
1041
1042         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
1043                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
1044         else
1045                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1046
1047         dpaa2_dev_atomic_write_link_status(dev, &link);
1048
1049         if (link.link_status)
1050                 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
1051         else
1052                 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
1053         return 0;
1054 }
1055
1056 /**
1057  * Toggle the DPNI to enable, if not already enabled.
1058  * This is not strictly PHY up/down - it is more of logical toggling.
1059  */
1060 static int
1061 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1062 {
1063         int ret = -EINVAL;
1064         struct dpaa2_dev_priv *priv;
1065         struct fsl_mc_io *dpni;
1066         int en = 0;
1067
1068         PMD_INIT_FUNC_TRACE();
1069
1070         priv = dev->data->dev_private;
1071         dpni = (struct fsl_mc_io *)priv->hw;
1072
1073         if (dpni == NULL) {
1074                 RTE_LOG(ERR, PMD, "Device has not yet been configured");
1075                 return ret;
1076         }
1077
1078         /* Check if DPNI is currently enabled */
1079         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1080         if (ret) {
1081                 /* Unable to obtain dpni status; Not continuing */
1082                 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1083                 return -EINVAL;
1084         }
1085
1086         /* Enable link if not already enabled */
1087         if (!en) {
1088                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1089                 if (ret) {
1090                         PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1091                         return -EINVAL;
1092                 }
1093         }
1094         /* changing tx burst function to start enqueues */
1095         dev->tx_pkt_burst = dpaa2_dev_tx;
1096         dev->data->dev_link.link_status = 1;
1097
1098         PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id);
1099         return ret;
1100 }
1101
1102 /**
1103  * Toggle the DPNI to disable, if not already disabled.
1104  * This is not strictly PHY up/down - it is more of logical toggling.
1105  */
1106 static int
1107 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1108 {
1109         int ret = -EINVAL;
1110         struct dpaa2_dev_priv *priv;
1111         struct fsl_mc_io *dpni;
1112         int dpni_enabled = 0;
1113         int retries = 10;
1114
1115         PMD_INIT_FUNC_TRACE();
1116
1117         priv = dev->data->dev_private;
1118         dpni = (struct fsl_mc_io *)priv->hw;
1119
1120         if (dpni == NULL) {
1121                 RTE_LOG(ERR, PMD, "Device has not yet been configured");
1122                 return ret;
1123         }
1124
1125         /*changing  tx burst function to avoid any more enqueues */
1126         dev->tx_pkt_burst = dummy_dev_tx;
1127
1128         /* Loop while dpni_disable() attempts to drain the egress FQs
1129          * and confirm them back to us.
1130          */
1131         do {
1132                 ret = dpni_disable(dpni, 0, priv->token);
1133                 if (ret) {
1134                         PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
1135                         return ret;
1136                 }
1137                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1138                 if (ret) {
1139                         PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
1140                         return ret;
1141                 }
1142                 if (dpni_enabled)
1143                         /* Allow the MC some slack */
1144                         rte_delay_us(100 * 1000);
1145         } while (dpni_enabled && --retries);
1146
1147         if (!retries) {
1148                 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
1149                 /* todo- we may have to manually cleanup queues.
1150                  */
1151         } else {
1152                 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
1153                             dev->data->port_id);
1154         }
1155
1156         dev->data->dev_link.link_status = 0;
1157
1158         return ret;
1159 }
1160
1161 static int
1162 dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1163 {
1164         int ret = -EINVAL;
1165         struct dpaa2_dev_priv *priv;
1166         struct fsl_mc_io *dpni;
1167         struct dpni_link_state state = {0};
1168
1169         PMD_INIT_FUNC_TRACE();
1170
1171         priv = dev->data->dev_private;
1172         dpni = (struct fsl_mc_io *)priv->hw;
1173
1174         if (dpni == NULL || fc_conf == NULL) {
1175                 RTE_LOG(ERR, PMD, "device not configured");
1176                 return ret;
1177         }
1178
1179         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1180         if (ret) {
1181                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
1182                 return ret;
1183         }
1184
1185         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1186         if (state.options & DPNI_LINK_OPT_PAUSE) {
1187                 /* DPNI_LINK_OPT_PAUSE set
1188                  *  if ASYM_PAUSE not set,
1189                  *      RX Side flow control (handle received Pause frame)
1190                  *      TX side flow control (send Pause frame)
1191                  *  if ASYM_PAUSE set,
1192                  *      RX Side flow control (handle received Pause frame)
1193                  *      No TX side flow control (send Pause frame disabled)
1194                  */
1195                 if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
1196                         fc_conf->mode = RTE_FC_FULL;
1197                 else
1198                         fc_conf->mode = RTE_FC_RX_PAUSE;
1199         } else {
1200                 /* DPNI_LINK_OPT_PAUSE not set
1201                  *  if ASYM_PAUSE set,
1202                  *      TX side flow control (send Pause frame)
1203                  *      No RX side flow control (No action on pause frame rx)
1204                  *  if ASYM_PAUSE not set,
1205                  *      Flow control disabled
1206                  */
1207                 if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
1208                         fc_conf->mode = RTE_FC_TX_PAUSE;
1209                 else
1210                         fc_conf->mode = RTE_FC_NONE;
1211         }
1212
1213         return ret;
1214 }
1215
1216 static int
1217 dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1218 {
1219         int ret = -EINVAL;
1220         struct dpaa2_dev_priv *priv;
1221         struct fsl_mc_io *dpni;
1222         struct dpni_link_state state = {0};
1223         struct dpni_link_cfg cfg = {0};
1224
1225         PMD_INIT_FUNC_TRACE();
1226
1227         priv = dev->data->dev_private;
1228         dpni = (struct fsl_mc_io *)priv->hw;
1229
1230         if (dpni == NULL) {
1231                 RTE_LOG(ERR, PMD, "dpni is NULL");
1232                 return ret;
1233         }
1234
1235         /* It is necessary to obtain the current state before setting fc_conf
1236          * as MC would return error in case rate, autoneg or duplex values are
1237          * different.
1238          */
1239         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
1240         if (ret) {
1241                 RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)", ret);
1242                 return -1;
1243         }
1244
1245         /* Disable link before setting configuration */
1246         dpaa2_dev_set_link_down(dev);
1247
1248         /* Based on fc_conf, update cfg */
1249         cfg.rate = state.rate;
1250         cfg.options = state.options;
1251
1252         /* update cfg with fc_conf */
1253         switch (fc_conf->mode) {
1254         case RTE_FC_FULL:
1255                 /* Full flow control;
1256                  * OPT_PAUSE set, ASYM_PAUSE not set
1257                  */
1258                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1259                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1260         case RTE_FC_TX_PAUSE:
1261                 /* Enable RX flow control
1262                  * OPT_PAUSE not set;
1263                  * ASYM_PAUSE set;
1264                  */
1265                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1266                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1267                 break;
1268         case RTE_FC_RX_PAUSE:
1269                 /* Enable TX Flow control
1270                  * OPT_PAUSE set
1271                  * ASYM_PAUSE set
1272                  */
1273                 cfg.options |= DPNI_LINK_OPT_PAUSE;
1274                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
1275                 break;
1276         case RTE_FC_NONE:
1277                 /* Disable Flow control
1278                  * OPT_PAUSE not set
1279                  * ASYM_PAUSE not set
1280                  */
1281                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
1282                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
1283                 break;
1284         default:
1285                 RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)",
1286                         fc_conf->mode);
1287                 return -1;
1288         }
1289
1290         ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
1291         if (ret)
1292                 RTE_LOG(ERR, PMD, "Unable to set Link configuration (err=%d)",
1293                         ret);
1294
1295         /* Enable link */
1296         dpaa2_dev_set_link_up(dev);
1297
1298         return ret;
1299 }
1300
1301 static struct eth_dev_ops dpaa2_ethdev_ops = {
1302         .dev_configure    = dpaa2_eth_dev_configure,
1303         .dev_start            = dpaa2_dev_start,
1304         .dev_stop             = dpaa2_dev_stop,
1305         .dev_close            = dpaa2_dev_close,
1306         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1307         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1308         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1309         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1310         .dev_set_link_up      = dpaa2_dev_set_link_up,
1311         .dev_set_link_down    = dpaa2_dev_set_link_down,
1312         .link_update       = dpaa2_dev_link_update,
1313         .stats_get             = dpaa2_dev_stats_get,
1314         .stats_reset       = dpaa2_dev_stats_reset,
1315         .fw_version_get    = dpaa2_fw_version_get,
1316         .dev_infos_get     = dpaa2_dev_info_get,
1317         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1318         .mtu_set           = dpaa2_dev_mtu_set,
1319         .vlan_filter_set      = dpaa2_vlan_filter_set,
1320         .vlan_offload_set     = dpaa2_vlan_offload_set,
1321         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1322         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1323         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1324         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1325         .flow_ctrl_get        = dpaa2_flow_ctrl_get,
1326         .flow_ctrl_set        = dpaa2_flow_ctrl_set,
1327         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1328         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1329         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1330 };
1331
1332 static int
1333 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1334 {
1335         struct rte_device *dev = eth_dev->device;
1336         struct rte_dpaa2_device *dpaa2_dev;
1337         struct fsl_mc_io *dpni_dev;
1338         struct dpni_attr attr;
1339         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1340         struct dpni_buffer_layout layout;
1341         int i, ret, hw_id;
1342
1343         PMD_INIT_FUNC_TRACE();
1344
1345         /* For secondary processes, the primary has done all the work */
1346         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1347                 return 0;
1348
1349         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1350
1351         hw_id = dpaa2_dev->object_id;
1352
1353         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1354         if (!dpni_dev) {
1355                 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
1356                 return -1;
1357         }
1358
1359         dpni_dev->regs = rte_mcp_ptr_list[0];
1360         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1361         if (ret) {
1362                 PMD_INIT_LOG(ERR,
1363                              "Failure in opening dpni@%d with err code %d\n",
1364                              hw_id, ret);
1365                 rte_free(dpni_dev);
1366                 return -1;
1367         }
1368
1369         /* Clean the device first */
1370         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1371         if (ret) {
1372                 PMD_INIT_LOG(ERR,
1373                              "Failure cleaning dpni@%d with err code %d\n",
1374                              hw_id, ret);
1375                 goto init_err;
1376         }
1377
1378         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1379         if (ret) {
1380                 PMD_INIT_LOG(ERR,
1381                              "Failure in get dpni@%d attribute, err code %d\n",
1382                              hw_id, ret);
1383                 goto init_err;
1384         }
1385
1386         priv->num_tc = attr.num_tcs;
1387         for (i = 0; i < attr.num_tcs; i++) {
1388                 priv->num_dist_per_tc[i] = attr.num_queues;
1389                 break;
1390         }
1391
1392         /* Distribution is per Tc only,
1393          * so choosing RX queues from default TC only
1394          */
1395         priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
1396
1397         if (attr.num_tcs == 1)
1398                 priv->nb_tx_queues = attr.num_queues;
1399         else
1400                 priv->nb_tx_queues = attr.num_tcs;
1401
1402         PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
1403         PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
1404
1405         priv->hw = dpni_dev;
1406         priv->hw_id = hw_id;
1407         priv->options = attr.options;
1408         priv->max_mac_filters = attr.mac_filter_entries;
1409         priv->max_vlan_filters = attr.vlan_filter_entries;
1410         priv->flags = 0;
1411
1412         priv->flags |= DPAA2_TX_CGR_SUPPORT;
1413         PMD_INIT_LOG(INFO, "Enable the tx congestion control support");
1414
1415         /* Allocate memory for hardware structure for queues */
1416         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1417         if (ret) {
1418                 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
1419                 goto init_err;
1420         }
1421
1422         /* Allocate memory for storing MAC addresses */
1423         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
1424                 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
1425         if (eth_dev->data->mac_addrs == NULL) {
1426                 PMD_INIT_LOG(ERR,
1427                    "Failed to allocate %d bytes needed to store MAC addresses",
1428                              ETHER_ADDR_LEN * attr.mac_filter_entries);
1429                 ret = -ENOMEM;
1430                 goto init_err;
1431         }
1432
1433         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1434                                         priv->token,
1435                         (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
1436         if (ret) {
1437                 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
1438                              ret);
1439                 goto init_err;
1440         }
1441
1442         /* ... tx buffer layout ... */
1443         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1444         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1445         layout.pass_frame_status = 1;
1446         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1447                                      DPNI_QUEUE_TX, &layout);
1448         if (ret) {
1449                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
1450                              ret);
1451                 goto init_err;
1452         }
1453
1454         /* ... tx-conf and error buffer layout ... */
1455         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1456         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1457         layout.pass_frame_status = 1;
1458         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1459                                      DPNI_QUEUE_TX_CONFIRM, &layout);
1460         if (ret) {
1461                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
1462                              ret);
1463                 goto init_err;
1464         }
1465
1466         eth_dev->dev_ops = &dpaa2_ethdev_ops;
1467
1468         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1469         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1470         rte_fslmc_vfio_dmamap();
1471
1472         return 0;
1473 init_err:
1474         dpaa2_dev_uninit(eth_dev);
1475         return ret;
1476 }
1477
1478 static int
1479 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
1480 {
1481         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1482         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1483         int i, ret;
1484         struct dpaa2_queue *dpaa2_q;
1485
1486         PMD_INIT_FUNC_TRACE();
1487
1488         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1489                 return -EPERM;
1490
1491         if (!dpni) {
1492                 PMD_INIT_LOG(WARNING, "Already closed or not started");
1493                 return -1;
1494         }
1495
1496         dpaa2_dev_close(eth_dev);
1497
1498         if (priv->rx_vq[0]) {
1499                 /* cleaning up queue storage */
1500                 for (i = 0; i < priv->nb_rx_queues; i++) {
1501                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1502                         if (dpaa2_q->q_storage)
1503                                 rte_free(dpaa2_q->q_storage);
1504                 }
1505                 /*free the all queue memory */
1506                 rte_free(priv->rx_vq[0]);
1507                 priv->rx_vq[0] = NULL;
1508         }
1509
1510         /* free memory for storing MAC addresses */
1511         if (eth_dev->data->mac_addrs) {
1512                 rte_free(eth_dev->data->mac_addrs);
1513                 eth_dev->data->mac_addrs = NULL;
1514         }
1515
1516         /* Close the device at underlying layer*/
1517         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1518         if (ret) {
1519                 PMD_INIT_LOG(ERR,
1520                              "Failure closing dpni device with err code %d\n",
1521                              ret);
1522         }
1523
1524         /* Free the allocated memory for ethernet private data and dpni*/
1525         priv->hw = NULL;
1526         rte_free(dpni);
1527
1528         eth_dev->dev_ops = NULL;
1529         eth_dev->rx_pkt_burst = NULL;
1530         eth_dev->tx_pkt_burst = NULL;
1531
1532         return 0;
1533 }
1534
1535 static int
1536 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
1537                 struct rte_dpaa2_device *dpaa2_dev)
1538 {
1539         struct rte_eth_dev *eth_dev;
1540         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
1541
1542         int diag;
1543
1544         sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
1545
1546         eth_dev = rte_eth_dev_allocate(ethdev_name);
1547         if (eth_dev == NULL)
1548                 return -ENOMEM;
1549
1550         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1551                 eth_dev->data->dev_private = rte_zmalloc(
1552                                                 "ethdev private structure",
1553                                                 sizeof(struct dpaa2_dev_priv),
1554                                                 RTE_CACHE_LINE_SIZE);
1555                 if (eth_dev->data->dev_private == NULL) {
1556                         PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
1557                                      " private port data\n");
1558                         rte_eth_dev_release_port(eth_dev);
1559                         return -ENOMEM;
1560                 }
1561         }
1562         eth_dev->device = &dpaa2_dev->device;
1563         eth_dev->device->driver = &dpaa2_drv->driver;
1564
1565         dpaa2_dev->eth_dev = eth_dev;
1566         eth_dev->data->rx_mbuf_alloc_failed = 0;
1567
1568         /* Invoke PMD device initialization function */
1569         diag = dpaa2_dev_init(eth_dev);
1570         if (diag == 0)
1571                 return 0;
1572
1573         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1574                 rte_free(eth_dev->data->dev_private);
1575         rte_eth_dev_release_port(eth_dev);
1576         return diag;
1577 }
1578
1579 static int
1580 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1581 {
1582         struct rte_eth_dev *eth_dev;
1583
1584         eth_dev = dpaa2_dev->eth_dev;
1585         dpaa2_dev_uninit(eth_dev);
1586
1587         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1588                 rte_free(eth_dev->data->dev_private);
1589         rte_eth_dev_release_port(eth_dev);
1590
1591         return 0;
1592 }
1593
1594 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1595         .drv_type = DPAA2_MC_DPNI_DEVID,
1596         .probe = rte_dpaa2_probe,
1597         .remove = rte_dpaa2_remove,
1598 };
1599
1600 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);