net/dpaa2: add link status config support
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
47
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <dpaa2_hw_dpio.h>
53
54 #include "dpaa2_ethdev.h"
55
56 static struct rte_dpaa2_driver rte_dpaa2_pmd;
57 static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev);
58 static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
59 static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
60
61 /**
62  * Atomically reads the link status information from global
63  * structure rte_eth_dev.
64  *
65  * @param dev
66  *   - Pointer to the structure rte_eth_dev to read from.
67  *   - Pointer to the buffer to be saved with the link status.
68  *
69  * @return
70  *   - On success, zero.
71  *   - On failure, negative value.
72  */
73 static inline int
74 dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
75                                   struct rte_eth_link *link)
76 {
77         struct rte_eth_link *dst = link;
78         struct rte_eth_link *src = &dev->data->dev_link;
79
80         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
81                                 *(uint64_t *)src) == 0)
82                 return -1;
83
84         return 0;
85 }
86
87 /**
88  * Atomically writes the link status information into global
89  * structure rte_eth_dev.
90  *
91  * @param dev
92  *   - Pointer to the structure rte_eth_dev to read from.
93  *   - Pointer to the buffer to be saved with the link status.
94  *
95  * @return
96  *   - On success, zero.
97  *   - On failure, negative value.
98  */
99 static inline int
100 dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
101                                    struct rte_eth_link *link)
102 {
103         struct rte_eth_link *dst = &dev->data->dev_link;
104         struct rte_eth_link *src = link;
105
106         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
107                                 *(uint64_t *)src) == 0)
108                 return -1;
109
110         return 0;
111 }
112
113 static int
114 dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
115 {
116         int ret;
117         struct dpaa2_dev_priv *priv = dev->data->dev_private;
118         struct fsl_mc_io *dpni = priv->hw;
119
120         PMD_INIT_FUNC_TRACE();
121
122         if (dpni == NULL) {
123                 RTE_LOG(ERR, PMD, "dpni is NULL");
124                 return -1;
125         }
126
127         if (on)
128                 ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW,
129                                        priv->token, vlan_id);
130         else
131                 ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW,
132                                           priv->token, vlan_id);
133
134         if (ret < 0)
135                 PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
136                             ret, vlan_id, priv->hw_id);
137
138         return ret;
139 }
140
141 static void
142 dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
143 {
144         struct dpaa2_dev_priv *priv = dev->data->dev_private;
145         struct fsl_mc_io *dpni = priv->hw;
146         int ret;
147
148         PMD_INIT_FUNC_TRACE();
149
150         if (mask & ETH_VLAN_FILTER_MASK) {
151                 if (dev->data->dev_conf.rxmode.hw_vlan_filter)
152                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
153                                                       priv->token, true);
154                 else
155                         ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
156                                                       priv->token, false);
157                 if (ret < 0)
158                         RTE_LOG(ERR, PMD, "Unable to set vlan filter ret = %d",
159                                 ret);
160         }
161 }
162
163 static void
164 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
165 {
166         struct dpaa2_dev_priv *priv = dev->data->dev_private;
167
168         PMD_INIT_FUNC_TRACE();
169
170         dev_info->if_index = priv->hw_id;
171
172         dev_info->max_mac_addrs = priv->max_mac_filters;
173         dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
174         dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
175         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
176         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
177         dev_info->rx_offload_capa =
178                 DEV_RX_OFFLOAD_IPV4_CKSUM |
179                 DEV_RX_OFFLOAD_UDP_CKSUM |
180                 DEV_RX_OFFLOAD_TCP_CKSUM |
181                 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
182         dev_info->tx_offload_capa =
183                 DEV_TX_OFFLOAD_IPV4_CKSUM |
184                 DEV_TX_OFFLOAD_UDP_CKSUM |
185                 DEV_TX_OFFLOAD_TCP_CKSUM |
186                 DEV_TX_OFFLOAD_SCTP_CKSUM |
187                 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
188         dev_info->speed_capa = ETH_LINK_SPEED_1G |
189                         ETH_LINK_SPEED_2_5G |
190                         ETH_LINK_SPEED_10G;
191 }
192
193 static int
194 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
195 {
196         struct dpaa2_dev_priv *priv = dev->data->dev_private;
197         uint16_t dist_idx;
198         uint32_t vq_id;
199         struct dpaa2_queue *mc_q, *mcq;
200         uint32_t tot_queues;
201         int i;
202         struct dpaa2_queue *dpaa2_q;
203
204         PMD_INIT_FUNC_TRACE();
205
206         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
207         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
208                           RTE_CACHE_LINE_SIZE);
209         if (!mc_q) {
210                 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
211                 return -1;
212         }
213
214         for (i = 0; i < priv->nb_rx_queues; i++) {
215                 mc_q->dev = dev;
216                 priv->rx_vq[i] = mc_q++;
217                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
218                 dpaa2_q->q_storage = rte_malloc("dq_storage",
219                                         sizeof(struct queue_storage_info_t),
220                                         RTE_CACHE_LINE_SIZE);
221                 if (!dpaa2_q->q_storage)
222                         goto fail;
223
224                 memset(dpaa2_q->q_storage, 0,
225                        sizeof(struct queue_storage_info_t));
226                 if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
227                         goto fail;
228         }
229
230         for (i = 0; i < priv->nb_tx_queues; i++) {
231                 mc_q->dev = dev;
232                 mc_q->flow_id = 0xffff;
233                 priv->tx_vq[i] = mc_q++;
234                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
235                 dpaa2_q->cscn = rte_malloc(NULL,
236                                            sizeof(struct qbman_result), 16);
237                 if (!dpaa2_q->cscn)
238                         goto fail_tx;
239         }
240
241         vq_id = 0;
242         for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
243              dist_idx++) {
244                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
245                 mcq->tc_index = DPAA2_DEF_TC;
246                 mcq->flow_id = dist_idx;
247                 vq_id++;
248         }
249
250         return 0;
251 fail_tx:
252         i -= 1;
253         while (i >= 0) {
254                 dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
255                 rte_free(dpaa2_q->cscn);
256                 priv->tx_vq[i--] = NULL;
257         }
258         i = priv->nb_rx_queues;
259 fail:
260         i -= 1;
261         mc_q = priv->rx_vq[0];
262         while (i >= 0) {
263                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
264                 dpaa2_free_dq_storage(dpaa2_q->q_storage);
265                 rte_free(dpaa2_q->q_storage);
266                 priv->rx_vq[i--] = NULL;
267         }
268         rte_free(mc_q);
269         return -1;
270 }
271
272 static int
273 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
274 {
275         struct rte_eth_dev_data *data = dev->data;
276         struct rte_eth_conf *eth_conf = &data->dev_conf;
277         int ret;
278
279         PMD_INIT_FUNC_TRACE();
280
281         /* Check for correct configuration */
282         if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
283             data->nb_rx_queues > 1) {
284                 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
285                             "but Rx queues more than 1\n");
286                 return -1;
287         }
288
289         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
290                 /* Return in case number of Rx queues is 1 */
291                 if (data->nb_rx_queues == 1)
292                         return 0;
293                 ret = dpaa2_setup_flow_dist(dev,
294                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
295                 if (ret) {
296                         PMD_INIT_LOG(ERR, "unable to set flow distribution."
297                                      "please check queue config\n");
298                         return ret;
299                 }
300         }
301         return 0;
302 }
303
304 /* Function to setup RX flow information. It contains traffic class ID,
305  * flow ID, destination configuration etc.
306  */
307 static int
308 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
309                          uint16_t rx_queue_id,
310                          uint16_t nb_rx_desc __rte_unused,
311                          unsigned int socket_id __rte_unused,
312                          const struct rte_eth_rxconf *rx_conf __rte_unused,
313                          struct rte_mempool *mb_pool)
314 {
315         struct dpaa2_dev_priv *priv = dev->data->dev_private;
316         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
317         struct dpaa2_queue *dpaa2_q;
318         struct dpni_queue cfg;
319         uint8_t options = 0;
320         uint8_t flow_id;
321         uint32_t bpid;
322         int ret;
323
324         PMD_INIT_FUNC_TRACE();
325
326         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
327                      dev, rx_queue_id, mb_pool, rx_conf);
328
329         if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
330                 bpid = mempool_to_bpid(mb_pool);
331                 ret = dpaa2_attach_bp_list(priv,
332                                            rte_dpaa2_bpid_info[bpid].bp_list);
333                 if (ret)
334                         return ret;
335         }
336         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
337         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
338
339         /*Get the tc id and flow id from given VQ id*/
340         flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
341         memset(&cfg, 0, sizeof(struct dpni_queue));
342
343         options = options | DPNI_QUEUE_OPT_USER_CTX;
344         cfg.user_context = (uint64_t)(dpaa2_q);
345
346         /*if ls2088 or rev2 device, enable the stashing */
347         if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
348                 options |= DPNI_QUEUE_OPT_FLC;
349                 cfg.flc.stash_control = true;
350                 cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
351                 /* 00 00 00 - last 6 bit represent annotation, context stashing,
352                  * data stashing setting 01 01 00 (0x14) to enable
353                  * 1 line data, 1 line annotation
354                  */
355                 cfg.flc.value |= 0x14;
356         }
357         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
358                              dpaa2_q->tc_index, flow_id, options, &cfg);
359         if (ret) {
360                 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
361                 return -1;
362         }
363
364         if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) {
365                 struct dpni_taildrop taildrop;
366
367                 taildrop.enable = 1;
368                 /*enabling per rx queue congestion control */
369                 taildrop.threshold = CONG_THRESHOLD_RX_Q;
370                 taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
371                 PMD_INIT_LOG(DEBUG, "Enabling Early Drop on queue = %d",
372                              rx_queue_id);
373                 ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
374                                         DPNI_CP_QUEUE, DPNI_QUEUE_RX,
375                                         dpaa2_q->tc_index, flow_id, &taildrop);
376                 if (ret) {
377                         PMD_INIT_LOG(ERR, "Error in setting the rx flow"
378                                      " err : = %d\n", ret);
379                         return -1;
380                 }
381         }
382
383         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
384         return 0;
385 }
386
387 static int
388 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
389                          uint16_t tx_queue_id,
390                          uint16_t nb_tx_desc __rte_unused,
391                          unsigned int socket_id __rte_unused,
392                          const struct rte_eth_txconf *tx_conf __rte_unused)
393 {
394         struct dpaa2_dev_priv *priv = dev->data->dev_private;
395         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
396                 priv->tx_vq[tx_queue_id];
397         struct fsl_mc_io *dpni = priv->hw;
398         struct dpni_queue tx_conf_cfg;
399         struct dpni_queue tx_flow_cfg;
400         uint8_t options = 0, flow_id;
401         uint32_t tc_id;
402         int ret;
403
404         PMD_INIT_FUNC_TRACE();
405
406         /* Return if queue already configured */
407         if (dpaa2_q->flow_id != 0xffff)
408                 return 0;
409
410         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
411         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
412
413         if (priv->num_tc == 1) {
414                 tc_id = 0;
415                 flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
416         } else {
417                 tc_id = tx_queue_id;
418                 flow_id = 0;
419         }
420
421         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
422                              tc_id, flow_id, options, &tx_flow_cfg);
423         if (ret) {
424                 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
425                              "tc_id=%d, flow =%d ErrorCode = %x\n",
426                              tc_id, flow_id, -ret);
427                         return -1;
428         }
429
430         dpaa2_q->flow_id = flow_id;
431
432         if (tx_queue_id == 0) {
433                 /*Set tx-conf and error configuration*/
434                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
435                                                     priv->token,
436                                                     DPNI_CONF_DISABLE);
437                 if (ret) {
438                         PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
439                                      " ErrorCode = %x", ret);
440                         return -1;
441                 }
442         }
443         dpaa2_q->tc_index = tc_id;
444
445         if (priv->flags & DPAA2_TX_CGR_SUPPORT) {
446                 struct dpni_congestion_notification_cfg cong_notif_cfg;
447
448                 cong_notif_cfg.units = DPNI_CONGESTION_UNIT_BYTES;
449                 /* Notify about congestion when the queue size is 32 KB */
450                 cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD;
451                 /* Notify that the queue is not congested when the data in
452                  * the queue is below this thershold.
453                  */
454                 cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
455                 cong_notif_cfg.message_ctx = 0;
456                 cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
457                 cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
458                 cong_notif_cfg.notification_mode =
459                                          DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
460                                          DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
461                                          DPNI_CONG_OPT_COHERENT_WRITE;
462
463                 ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
464                                                        priv->token,
465                                                        DPNI_QUEUE_TX,
466                                                        tc_id,
467                                                        &cong_notif_cfg);
468                 if (ret) {
469                         PMD_INIT_LOG(ERR,
470                            "Error in setting tx congestion notification: = %d",
471                            -ret);
472                         return -ret;
473                 }
474         }
475         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
476         return 0;
477 }
478
479 static void
480 dpaa2_dev_rx_queue_release(void *q __rte_unused)
481 {
482         PMD_INIT_FUNC_TRACE();
483 }
484
485 static void
486 dpaa2_dev_tx_queue_release(void *q __rte_unused)
487 {
488         PMD_INIT_FUNC_TRACE();
489 }
490
491 static const uint32_t *
492 dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
493 {
494         static const uint32_t ptypes[] = {
495                 /*todo -= add more types */
496                 RTE_PTYPE_L2_ETHER,
497                 RTE_PTYPE_L3_IPV4,
498                 RTE_PTYPE_L3_IPV4_EXT,
499                 RTE_PTYPE_L3_IPV6,
500                 RTE_PTYPE_L3_IPV6_EXT,
501                 RTE_PTYPE_L4_TCP,
502                 RTE_PTYPE_L4_UDP,
503                 RTE_PTYPE_L4_SCTP,
504                 RTE_PTYPE_L4_ICMP,
505                 RTE_PTYPE_UNKNOWN
506         };
507
508         if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx)
509                 return ptypes;
510         return NULL;
511 }
512
513 static int
514 dpaa2_dev_start(struct rte_eth_dev *dev)
515 {
516         struct rte_eth_dev_data *data = dev->data;
517         struct dpaa2_dev_priv *priv = data->dev_private;
518         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
519         struct dpni_queue cfg;
520         struct dpni_error_cfg   err_cfg;
521         uint16_t qdid;
522         struct dpni_queue_id qid;
523         struct dpaa2_queue *dpaa2_q;
524         int ret, i;
525
526         PMD_INIT_FUNC_TRACE();
527
528         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
529         if (ret) {
530                 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
531                              ret, priv->hw_id);
532                 return ret;
533         }
534
535         /* Power up the phy. Needed to make the link go Up */
536         dpaa2_dev_set_link_up(dev);
537
538         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
539                             DPNI_QUEUE_TX, &qdid);
540         if (ret) {
541                 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
542                 return ret;
543         }
544         priv->qdid = qdid;
545
546         for (i = 0; i < data->nb_rx_queues; i++) {
547                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
548                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
549                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
550                                        dpaa2_q->flow_id, &cfg, &qid);
551                 if (ret) {
552                         PMD_INIT_LOG(ERR, "Error to get flow "
553                                      "information Error code = %d\n", ret);
554                         return ret;
555                 }
556                 dpaa2_q->fqid = qid.fqid;
557         }
558
559         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
560                                DPNI_OFF_RX_L3_CSUM, true);
561         if (ret) {
562                 PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
563                 return ret;
564         }
565
566         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
567                                DPNI_OFF_RX_L4_CSUM, true);
568         if (ret) {
569                 PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
570                 return ret;
571         }
572
573         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
574                                DPNI_OFF_TX_L3_CSUM, true);
575         if (ret) {
576                 PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
577                 return ret;
578         }
579
580         ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
581                                DPNI_OFF_TX_L4_CSUM, true);
582         if (ret) {
583                 PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
584                 return ret;
585         }
586
587         /*checksum errors, send them to normal path and set it in annotation */
588         err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
589
590         err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
591         err_cfg.set_frame_annotation = true;
592
593         ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
594                                        priv->token, &err_cfg);
595         if (ret) {
596                 PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
597                              "code = %d\n", ret);
598                 return ret;
599         }
600         /* VLAN Offload Settings */
601         if (priv->max_vlan_filters)
602                 dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
603
604         return 0;
605 }
606
607 /**
608  *  This routine disables all traffic on the adapter by issuing a
609  *  global reset on the MAC.
610  */
611 static void
612 dpaa2_dev_stop(struct rte_eth_dev *dev)
613 {
614         struct dpaa2_dev_priv *priv = dev->data->dev_private;
615         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
616         int ret;
617         struct rte_eth_link link;
618
619         PMD_INIT_FUNC_TRACE();
620
621         dpaa2_dev_set_link_down(dev);
622
623         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
624         if (ret) {
625                 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
626                              ret, priv->hw_id);
627                 return;
628         }
629
630         /* clear the recorded link status */
631         memset(&link, 0, sizeof(link));
632         dpaa2_dev_atomic_write_link_status(dev, &link);
633 }
634
635 static void
636 dpaa2_dev_close(struct rte_eth_dev *dev)
637 {
638         struct rte_eth_dev_data *data = dev->data;
639         struct dpaa2_dev_priv *priv = dev->data->dev_private;
640         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
641         int i, ret;
642         struct rte_eth_link link;
643         struct dpaa2_queue *dpaa2_q;
644
645         PMD_INIT_FUNC_TRACE();
646
647         for (i = 0; i < data->nb_tx_queues; i++) {
648                 dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i];
649                 if (!dpaa2_q->cscn) {
650                         rte_free(dpaa2_q->cscn);
651                         dpaa2_q->cscn = NULL;
652                 }
653         }
654
655         /* Clean the device first */
656         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
657         if (ret) {
658                 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
659                              " error code %d\n", ret);
660                 return;
661         }
662
663         memset(&link, 0, sizeof(link));
664         dpaa2_dev_atomic_write_link_status(dev, &link);
665 }
666
667 static void
668 dpaa2_dev_promiscuous_enable(
669                 struct rte_eth_dev *dev)
670 {
671         int ret;
672         struct dpaa2_dev_priv *priv = dev->data->dev_private;
673         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
674
675         PMD_INIT_FUNC_TRACE();
676
677         if (dpni == NULL) {
678                 RTE_LOG(ERR, PMD, "dpni is NULL");
679                 return;
680         }
681
682         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
683         if (ret < 0)
684                 RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d", ret);
685
686         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
687         if (ret < 0)
688                 RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d", ret);
689 }
690
691 static void
692 dpaa2_dev_promiscuous_disable(
693                 struct rte_eth_dev *dev)
694 {
695         int ret;
696         struct dpaa2_dev_priv *priv = dev->data->dev_private;
697         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
698
699         PMD_INIT_FUNC_TRACE();
700
701         if (dpni == NULL) {
702                 RTE_LOG(ERR, PMD, "dpni is NULL");
703                 return;
704         }
705
706         ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
707         if (ret < 0)
708                 RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d", ret);
709
710         if (dev->data->all_multicast == 0) {
711                 ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
712                                                  priv->token, false);
713                 if (ret < 0)
714                         RTE_LOG(ERR, PMD, "Unable to disable M promisc mode %d",
715                                 ret);
716         }
717 }
718
719 static void
720 dpaa2_dev_allmulticast_enable(
721                 struct rte_eth_dev *dev)
722 {
723         int ret;
724         struct dpaa2_dev_priv *priv = dev->data->dev_private;
725         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
726
727         PMD_INIT_FUNC_TRACE();
728
729         if (dpni == NULL) {
730                 RTE_LOG(ERR, PMD, "dpni is NULL");
731                 return;
732         }
733
734         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
735         if (ret < 0)
736                 RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d", ret);
737 }
738
739 static void
740 dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
741 {
742         int ret;
743         struct dpaa2_dev_priv *priv = dev->data->dev_private;
744         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
745
746         PMD_INIT_FUNC_TRACE();
747
748         if (dpni == NULL) {
749                 RTE_LOG(ERR, PMD, "dpni is NULL");
750                 return;
751         }
752
753         /* must remain on for all promiscuous */
754         if (dev->data->promiscuous == 1)
755                 return;
756
757         ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
758         if (ret < 0)
759                 RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d", ret);
760 }
761
762 static int
763 dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
764 {
765         int ret;
766         struct dpaa2_dev_priv *priv = dev->data->dev_private;
767         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
768         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
769
770         PMD_INIT_FUNC_TRACE();
771
772         if (dpni == NULL) {
773                 RTE_LOG(ERR, PMD, "dpni is NULL");
774                 return -EINVAL;
775         }
776
777         /* check that mtu is within the allowed range */
778         if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
779                 return -EINVAL;
780
781         /* Set the Max Rx frame length as 'mtu' +
782          * Maximum Ethernet header length
783          */
784         ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
785                                         mtu + ETH_VLAN_HLEN);
786         if (ret) {
787                 PMD_DRV_LOG(ERR, "setting the max frame length failed");
788                 return -1;
789         }
790         PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
791         return 0;
792 }
793
794 static int
795 dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
796                        struct ether_addr *addr,
797                        __rte_unused uint32_t index,
798                        __rte_unused uint32_t pool)
799 {
800         int ret;
801         struct dpaa2_dev_priv *priv = dev->data->dev_private;
802         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
803
804         PMD_INIT_FUNC_TRACE();
805
806         if (dpni == NULL) {
807                 RTE_LOG(ERR, PMD, "dpni is NULL");
808                 return -1;
809         }
810
811         ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
812                                 priv->token, addr->addr_bytes);
813         if (ret)
814                 RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
815                         " err = %d", ret);
816         return 0;
817 }
818
819 static void
820 dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
821                           uint32_t index)
822 {
823         int ret;
824         struct dpaa2_dev_priv *priv = dev->data->dev_private;
825         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
826         struct rte_eth_dev_data *data = dev->data;
827         struct ether_addr *macaddr;
828
829         PMD_INIT_FUNC_TRACE();
830
831         macaddr = &data->mac_addrs[index];
832
833         if (dpni == NULL) {
834                 RTE_LOG(ERR, PMD, "dpni is NULL");
835                 return;
836         }
837
838         ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
839                                    priv->token, macaddr->addr_bytes);
840         if (ret)
841                 RTE_LOG(ERR, PMD, "error: Removing the MAC ADDR failed:"
842                         " err = %d", ret);
843 }
844
845 static void
846 dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
847                        struct ether_addr *addr)
848 {
849         int ret;
850         struct dpaa2_dev_priv *priv = dev->data->dev_private;
851         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
852
853         PMD_INIT_FUNC_TRACE();
854
855         if (dpni == NULL) {
856                 RTE_LOG(ERR, PMD, "dpni is NULL");
857                 return;
858         }
859
860         ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
861                                         priv->token, addr->addr_bytes);
862
863         if (ret)
864                 RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
865 }
866 static
867 void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
868                          struct rte_eth_stats *stats)
869 {
870         struct dpaa2_dev_priv *priv = dev->data->dev_private;
871         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
872         int32_t  retcode;
873         uint8_t page0 = 0, page1 = 1, page2 = 2;
874         union dpni_statistics value;
875
876         memset(&value, 0, sizeof(union dpni_statistics));
877
878         PMD_INIT_FUNC_TRACE();
879
880         if (!dpni) {
881                 RTE_LOG(ERR, PMD, "dpni is NULL");
882                 return;
883         }
884
885         if (!stats) {
886                 RTE_LOG(ERR, PMD, "stats is NULL");
887                 return;
888         }
889
890         /*Get Counters from page_0*/
891         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
892                                       page0, &value);
893         if (retcode)
894                 goto err;
895
896         stats->ipackets = value.page_0.ingress_all_frames;
897         stats->ibytes = value.page_0.ingress_all_bytes;
898
899         /*Get Counters from page_1*/
900         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
901                                       page1, &value);
902         if (retcode)
903                 goto err;
904
905         stats->opackets = value.page_1.egress_all_frames;
906         stats->obytes = value.page_1.egress_all_bytes;
907
908         /*Get Counters from page_2*/
909         retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
910                                       page2, &value);
911         if (retcode)
912                 goto err;
913
914         /* Ingress drop frame count due to configured rules */
915         stats->ierrors = value.page_2.ingress_filtered_frames;
916         /* Ingress drop frame count due to error */
917         stats->ierrors += value.page_2.ingress_discarded_frames;
918
919         stats->oerrors = value.page_2.egress_discarded_frames;
920         stats->imissed = value.page_2.ingress_nobuffer_discards;
921
922         return;
923
924 err:
925         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
926         return;
927 };
928
929 static
930 void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
931 {
932         struct dpaa2_dev_priv *priv = dev->data->dev_private;
933         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
934         int32_t  retcode;
935
936         PMD_INIT_FUNC_TRACE();
937
938         if (dpni == NULL) {
939                 RTE_LOG(ERR, PMD, "dpni is NULL");
940                 return;
941         }
942
943         retcode =  dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
944         if (retcode)
945                 goto error;
946
947         return;
948
949 error:
950         RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
951         return;
952 };
953
954 /* return 0 means link status changed, -1 means not changed */
955 static int
956 dpaa2_dev_link_update(struct rte_eth_dev *dev,
957                         int wait_to_complete __rte_unused)
958 {
959         int ret;
960         struct dpaa2_dev_priv *priv = dev->data->dev_private;
961         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
962         struct rte_eth_link link, old;
963         struct dpni_link_state state = {0};
964
965         PMD_INIT_FUNC_TRACE();
966
967         if (dpni == NULL) {
968                 RTE_LOG(ERR, PMD, "error : dpni is NULL");
969                 return 0;
970         }
971         memset(&old, 0, sizeof(old));
972         dpaa2_dev_atomic_read_link_status(dev, &old);
973
974         ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
975         if (ret < 0) {
976                 RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
977                 return -1;
978         }
979
980         if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
981                 RTE_LOG(DEBUG, PMD, "No change in status\n");
982                 return -1;
983         }
984
985         memset(&link, 0, sizeof(struct rte_eth_link));
986         link.link_status = state.up;
987         link.link_speed = state.rate;
988
989         if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
990                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
991         else
992                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
993
994         dpaa2_dev_atomic_write_link_status(dev, &link);
995
996         if (link.link_status)
997                 PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
998         else
999                 PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
1000         return 0;
1001 }
1002
1003 /**
1004  * Toggle the DPNI to enable, if not already enabled.
1005  * This is not strictly PHY up/down - it is more of logical toggling.
1006  */
1007 static int
1008 dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
1009 {
1010         int ret = -EINVAL;
1011         struct dpaa2_dev_priv *priv;
1012         struct fsl_mc_io *dpni;
1013         int en = 0;
1014
1015         PMD_INIT_FUNC_TRACE();
1016
1017         priv = dev->data->dev_private;
1018         dpni = (struct fsl_mc_io *)priv->hw;
1019
1020         if (dpni == NULL) {
1021                 RTE_LOG(ERR, PMD, "Device has not yet been configured");
1022                 return ret;
1023         }
1024
1025         /* Check if DPNI is currently enabled */
1026         ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
1027         if (ret) {
1028                 /* Unable to obtain dpni status; Not continuing */
1029                 PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1030                 return -EINVAL;
1031         }
1032
1033         /* Enable link if not already enabled */
1034         if (!en) {
1035                 ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
1036                 if (ret) {
1037                         PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
1038                         return -EINVAL;
1039                 }
1040         }
1041         /* changing tx burst function to start enqueues */
1042         dev->tx_pkt_burst = dpaa2_dev_tx;
1043         dev->data->dev_link.link_status = 1;
1044
1045         PMD_DRV_LOG(INFO, "Port %d Link UP successful", dev->data->port_id);
1046         return ret;
1047 }
1048
1049 /**
1050  * Toggle the DPNI to disable, if not already disabled.
1051  * This is not strictly PHY up/down - it is more of logical toggling.
1052  */
1053 static int
1054 dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
1055 {
1056         int ret = -EINVAL;
1057         struct dpaa2_dev_priv *priv;
1058         struct fsl_mc_io *dpni;
1059         int dpni_enabled = 0;
1060         int retries = 10;
1061
1062         PMD_INIT_FUNC_TRACE();
1063
1064         priv = dev->data->dev_private;
1065         dpni = (struct fsl_mc_io *)priv->hw;
1066
1067         if (dpni == NULL) {
1068                 RTE_LOG(ERR, PMD, "Device has not yet been configured");
1069                 return ret;
1070         }
1071
1072         /*changing  tx burst function to avoid any more enqueues */
1073         dev->tx_pkt_burst = dummy_dev_tx;
1074
1075         /* Loop while dpni_disable() attempts to drain the egress FQs
1076          * and confirm them back to us.
1077          */
1078         do {
1079                 ret = dpni_disable(dpni, 0, priv->token);
1080                 if (ret) {
1081                         PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
1082                         return ret;
1083                 }
1084                 ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
1085                 if (ret) {
1086                         PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
1087                         return ret;
1088                 }
1089                 if (dpni_enabled)
1090                         /* Allow the MC some slack */
1091                         rte_delay_us(100 * 1000);
1092         } while (dpni_enabled && --retries);
1093
1094         if (!retries) {
1095                 PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
1096                 /* todo- we may have to manually cleanup queues.
1097                  */
1098         } else {
1099                 PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
1100                             dev->data->port_id);
1101         }
1102
1103         dev->data->dev_link.link_status = 0;
1104
1105         return ret;
1106 }
1107
1108 static struct eth_dev_ops dpaa2_ethdev_ops = {
1109         .dev_configure    = dpaa2_eth_dev_configure,
1110         .dev_start            = dpaa2_dev_start,
1111         .dev_stop             = dpaa2_dev_stop,
1112         .dev_close            = dpaa2_dev_close,
1113         .promiscuous_enable   = dpaa2_dev_promiscuous_enable,
1114         .promiscuous_disable  = dpaa2_dev_promiscuous_disable,
1115         .allmulticast_enable  = dpaa2_dev_allmulticast_enable,
1116         .allmulticast_disable = dpaa2_dev_allmulticast_disable,
1117         .dev_set_link_up      = dpaa2_dev_set_link_up,
1118         .dev_set_link_down    = dpaa2_dev_set_link_down,
1119         .link_update       = dpaa2_dev_link_update,
1120         .stats_get             = dpaa2_dev_stats_get,
1121         .stats_reset       = dpaa2_dev_stats_reset,
1122         .dev_infos_get     = dpaa2_dev_info_get,
1123         .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
1124         .mtu_set           = dpaa2_dev_mtu_set,
1125         .vlan_filter_set      = dpaa2_vlan_filter_set,
1126         .vlan_offload_set     = dpaa2_vlan_offload_set,
1127         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
1128         .rx_queue_release  = dpaa2_dev_rx_queue_release,
1129         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
1130         .tx_queue_release  = dpaa2_dev_tx_queue_release,
1131         .mac_addr_add         = dpaa2_dev_add_mac_addr,
1132         .mac_addr_remove      = dpaa2_dev_remove_mac_addr,
1133         .mac_addr_set         = dpaa2_dev_set_mac_addr,
1134 };
1135
1136 static int
1137 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
1138 {
1139         struct rte_device *dev = eth_dev->device;
1140         struct rte_dpaa2_device *dpaa2_dev;
1141         struct fsl_mc_io *dpni_dev;
1142         struct dpni_attr attr;
1143         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1144         struct dpni_buffer_layout layout;
1145         int i, ret, hw_id;
1146
1147         PMD_INIT_FUNC_TRACE();
1148
1149         /* For secondary processes, the primary has done all the work */
1150         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1151                 return 0;
1152
1153         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1154
1155         hw_id = dpaa2_dev->object_id;
1156
1157         dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
1158         if (!dpni_dev) {
1159                 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
1160                 return -1;
1161         }
1162
1163         dpni_dev->regs = rte_mcp_ptr_list[0];
1164         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
1165         if (ret) {
1166                 PMD_INIT_LOG(ERR,
1167                              "Failure in opening dpni@%d with err code %d\n",
1168                              hw_id, ret);
1169                 rte_free(dpni_dev);
1170                 return -1;
1171         }
1172
1173         /* Clean the device first */
1174         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
1175         if (ret) {
1176                 PMD_INIT_LOG(ERR,
1177                              "Failure cleaning dpni@%d with err code %d\n",
1178                              hw_id, ret);
1179                 goto init_err;
1180         }
1181
1182         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
1183         if (ret) {
1184                 PMD_INIT_LOG(ERR,
1185                              "Failure in get dpni@%d attribute, err code %d\n",
1186                              hw_id, ret);
1187                 goto init_err;
1188         }
1189
1190         priv->num_tc = attr.num_tcs;
1191         for (i = 0; i < attr.num_tcs; i++) {
1192                 priv->num_dist_per_tc[i] = attr.num_queues;
1193                 break;
1194         }
1195
1196         /* Distribution is per Tc only,
1197          * so choosing RX queues from default TC only
1198          */
1199         priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
1200
1201         if (attr.num_tcs == 1)
1202                 priv->nb_tx_queues = attr.num_queues;
1203         else
1204                 priv->nb_tx_queues = attr.num_tcs;
1205
1206         PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
1207         PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
1208
1209         priv->hw = dpni_dev;
1210         priv->hw_id = hw_id;
1211         priv->options = attr.options;
1212         priv->max_mac_filters = attr.mac_filter_entries;
1213         priv->max_vlan_filters = attr.vlan_filter_entries;
1214         priv->flags = 0;
1215
1216         priv->flags |= DPAA2_TX_CGR_SUPPORT;
1217         PMD_INIT_LOG(INFO, "Enable the tx congestion control support");
1218
1219         /* Allocate memory for hardware structure for queues */
1220         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
1221         if (ret) {
1222                 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
1223                 goto init_err;
1224         }
1225
1226         /* Allocate memory for storing MAC addresses */
1227         eth_dev->data->mac_addrs = rte_zmalloc("dpni",
1228                 ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
1229         if (eth_dev->data->mac_addrs == NULL) {
1230                 PMD_INIT_LOG(ERR,
1231                    "Failed to allocate %d bytes needed to store MAC addresses",
1232                              ETHER_ADDR_LEN * attr.mac_filter_entries);
1233                 ret = -ENOMEM;
1234                 goto init_err;
1235         }
1236
1237         ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
1238                                         priv->token,
1239                         (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
1240         if (ret) {
1241                 PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
1242                              ret);
1243                 goto init_err;
1244         }
1245
1246         /* ... tx buffer layout ... */
1247         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1248         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1249         layout.pass_frame_status = 1;
1250         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1251                                      DPNI_QUEUE_TX, &layout);
1252         if (ret) {
1253                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
1254                              ret);
1255                 goto init_err;
1256         }
1257
1258         /* ... tx-conf and error buffer layout ... */
1259         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
1260         layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
1261         layout.pass_frame_status = 1;
1262         ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
1263                                      DPNI_QUEUE_TX_CONFIRM, &layout);
1264         if (ret) {
1265                 PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
1266                              ret);
1267                 goto init_err;
1268         }
1269
1270         eth_dev->dev_ops = &dpaa2_ethdev_ops;
1271         eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
1272
1273         eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
1274         eth_dev->tx_pkt_burst = dpaa2_dev_tx;
1275         rte_fslmc_vfio_dmamap();
1276
1277         return 0;
1278 init_err:
1279         dpaa2_dev_uninit(eth_dev);
1280         return ret;
1281 }
1282
1283 static int
1284 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
1285 {
1286         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
1287         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1288         int i, ret;
1289         struct dpaa2_queue *dpaa2_q;
1290
1291         PMD_INIT_FUNC_TRACE();
1292
1293         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1294                 return -EPERM;
1295
1296         if (!dpni) {
1297                 PMD_INIT_LOG(WARNING, "Already closed or not started");
1298                 return -1;
1299         }
1300
1301         dpaa2_dev_close(eth_dev);
1302
1303         if (priv->rx_vq[0]) {
1304                 /* cleaning up queue storage */
1305                 for (i = 0; i < priv->nb_rx_queues; i++) {
1306                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
1307                         if (dpaa2_q->q_storage)
1308                                 rte_free(dpaa2_q->q_storage);
1309                 }
1310                 /*free the all queue memory */
1311                 rte_free(priv->rx_vq[0]);
1312                 priv->rx_vq[0] = NULL;
1313         }
1314
1315         /* free memory for storing MAC addresses */
1316         if (eth_dev->data->mac_addrs) {
1317                 rte_free(eth_dev->data->mac_addrs);
1318                 eth_dev->data->mac_addrs = NULL;
1319         }
1320
1321         /* Close the device at underlying layer*/
1322         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
1323         if (ret) {
1324                 PMD_INIT_LOG(ERR,
1325                              "Failure closing dpni device with err code %d\n",
1326                              ret);
1327         }
1328
1329         /* Free the allocated memory for ethernet private data and dpni*/
1330         priv->hw = NULL;
1331         rte_free(dpni);
1332
1333         eth_dev->dev_ops = NULL;
1334         eth_dev->rx_pkt_burst = NULL;
1335         eth_dev->tx_pkt_burst = NULL;
1336
1337         return 0;
1338 }
1339
1340 static int
1341 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
1342                 struct rte_dpaa2_device *dpaa2_dev)
1343 {
1344         struct rte_eth_dev *eth_dev;
1345         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
1346
1347         int diag;
1348
1349         sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
1350
1351         eth_dev = rte_eth_dev_allocate(ethdev_name);
1352         if (eth_dev == NULL)
1353                 return -ENOMEM;
1354
1355         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1356                 eth_dev->data->dev_private = rte_zmalloc(
1357                                                 "ethdev private structure",
1358                                                 sizeof(struct dpaa2_dev_priv),
1359                                                 RTE_CACHE_LINE_SIZE);
1360                 if (eth_dev->data->dev_private == NULL) {
1361                         PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
1362                                      " private port data\n");
1363                         rte_eth_dev_release_port(eth_dev);
1364                         return -ENOMEM;
1365                 }
1366         }
1367         eth_dev->device = &dpaa2_dev->device;
1368         dpaa2_dev->eth_dev = eth_dev;
1369         eth_dev->data->rx_mbuf_alloc_failed = 0;
1370
1371         /* Invoke PMD device initialization function */
1372         diag = dpaa2_dev_init(eth_dev);
1373         if (diag == 0)
1374                 return 0;
1375
1376         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1377                 rte_free(eth_dev->data->dev_private);
1378         rte_eth_dev_release_port(eth_dev);
1379         return diag;
1380 }
1381
1382 static int
1383 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
1384 {
1385         struct rte_eth_dev *eth_dev;
1386
1387         eth_dev = dpaa2_dev->eth_dev;
1388         dpaa2_dev_uninit(eth_dev);
1389
1390         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1391                 rte_free(eth_dev->data->dev_private);
1392         rte_eth_dev_release_port(eth_dev);
1393
1394         return 0;
1395 }
1396
1397 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
1398         .drv_type = DPAA2_MC_DPNI_DEVID,
1399         .probe = rte_dpaa2_probe,
1400         .remove = rte_dpaa2_remove,
1401 };
1402
1403 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);