net/dpaa2: add basic operations
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
47
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51
52 #include "dpaa2_ethdev.h"
53
54 static struct rte_dpaa2_driver rte_dpaa2_pmd;
55
56 static void
57 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
58 {
59         struct dpaa2_dev_priv *priv = dev->data->dev_private;
60
61         PMD_INIT_FUNC_TRACE();
62
63         dev_info->if_index = priv->hw_id;
64
65         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
66         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
67
68         dev_info->speed_capa = ETH_LINK_SPEED_1G |
69                         ETH_LINK_SPEED_2_5G |
70                         ETH_LINK_SPEED_10G;
71 }
72
73 static int
74 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
75 {
76         struct dpaa2_dev_priv *priv = dev->data->dev_private;
77         uint16_t dist_idx;
78         uint32_t vq_id;
79         struct dpaa2_queue *mc_q, *mcq;
80         uint32_t tot_queues;
81         int i;
82         struct dpaa2_queue *dpaa2_q;
83
84         PMD_INIT_FUNC_TRACE();
85
86         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
87         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
88                           RTE_CACHE_LINE_SIZE);
89         if (!mc_q) {
90                 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
91                 return -1;
92         }
93
94         for (i = 0; i < priv->nb_rx_queues; i++) {
95                 mc_q->dev = dev;
96                 priv->rx_vq[i] = mc_q++;
97                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
98                 dpaa2_q->q_storage = rte_malloc("dq_storage",
99                                         sizeof(struct queue_storage_info_t),
100                                         RTE_CACHE_LINE_SIZE);
101                 if (!dpaa2_q->q_storage)
102                         goto fail;
103
104                 memset(dpaa2_q->q_storage, 0,
105                        sizeof(struct queue_storage_info_t));
106                 dpaa2_q->q_storage->dq_storage[0] = rte_malloc(NULL,
107                         DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
108                         RTE_CACHE_LINE_SIZE);
109         }
110
111         for (i = 0; i < priv->nb_tx_queues; i++) {
112                 mc_q->dev = dev;
113                 mc_q->flow_id = DPNI_NEW_FLOW_ID;
114                 priv->tx_vq[i] = mc_q++;
115         }
116
117         vq_id = 0;
118         for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
119                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
120                 mcq->tc_index = DPAA2_DEF_TC;
121                 mcq->flow_id = dist_idx;
122                 vq_id++;
123         }
124
125         return 0;
126 fail:
127         i -= 1;
128         mc_q = priv->rx_vq[0];
129         while (i >= 0) {
130                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
131                 rte_free(dpaa2_q->q_storage->dq_storage[0]);
132                 rte_free(dpaa2_q->q_storage);
133                 priv->rx_vq[i--] = NULL;
134         }
135         rte_free(mc_q);
136         return -1;
137 }
138
139 static int
140 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
141 {
142         struct rte_eth_dev_data *data = dev->data;
143         struct rte_eth_conf *eth_conf = &data->dev_conf;
144
145         PMD_INIT_FUNC_TRACE();
146
147         /* Check for correct configuration */
148         if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
149             data->nb_rx_queues > 1) {
150                 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
151                             "but Rx queues more than 1\n");
152                 return -1;
153         }
154
155         return 0;
156 }
157
158 /* Function to setup RX flow information. It contains traffic class ID,
159  * flow ID, destination configuration etc.
160  */
161 static int
162 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
163                          uint16_t rx_queue_id,
164                          uint16_t nb_rx_desc __rte_unused,
165                          unsigned int socket_id __rte_unused,
166                          const struct rte_eth_rxconf *rx_conf __rte_unused,
167                          struct rte_mempool *mb_pool)
168 {
169         struct dpaa2_dev_priv *priv = dev->data->dev_private;
170         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
171         struct dpaa2_queue *dpaa2_q;
172         struct dpni_queue cfg;
173         uint8_t options = 0;
174         uint8_t flow_id;
175         int ret;
176
177         PMD_INIT_FUNC_TRACE();
178
179         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
180                      dev, rx_queue_id, mb_pool, rx_conf);
181
182         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
183         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
184
185         /*Get the tc id and flow id from given VQ id*/
186         flow_id = rx_queue_id;
187         memset(&cfg, 0, sizeof(struct dpni_queue));
188
189         options = options | DPNI_QUEUE_OPT_USER_CTX;
190         cfg.user_context = (uint64_t)(dpaa2_q);
191
192         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
193                              dpaa2_q->tc_index, flow_id, options, &cfg);
194         if (ret) {
195                 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
196                 return -1;
197         }
198
199         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
200         return 0;
201 }
202
203 static int
204 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
205                          uint16_t tx_queue_id,
206                          uint16_t nb_tx_desc __rte_unused,
207                          unsigned int socket_id __rte_unused,
208                          const struct rte_eth_txconf *tx_conf __rte_unused)
209 {
210         struct dpaa2_dev_priv *priv = dev->data->dev_private;
211         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
212                 priv->tx_vq[tx_queue_id];
213         struct fsl_mc_io *dpni = priv->hw;
214         struct dpni_queue tx_conf_cfg;
215         struct dpni_queue tx_flow_cfg;
216         uint8_t options = 0, flow_id;
217         uint32_t tc_id;
218         int ret;
219
220         PMD_INIT_FUNC_TRACE();
221
222         /* Return if queue already configured */
223         if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
224                 return 0;
225
226         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
227         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
228
229         tc_id = 0;
230         flow_id = tx_queue_id;
231
232         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
233                              tc_id, flow_id, options, &tx_flow_cfg);
234         if (ret) {
235                 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
236                              "tc_id=%d, flow =%d ErrorCode = %x\n",
237                              tc_id, flow_id, -ret);
238                         return -1;
239         }
240
241         dpaa2_q->flow_id = flow_id;
242
243         if (tx_queue_id == 0) {
244                 /*Set tx-conf and error configuration*/
245                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
246                                                     priv->token,
247                                                     DPNI_CONF_DISABLE);
248                 if (ret) {
249                         PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
250                                      " ErrorCode = %x", ret);
251                         return -1;
252                 }
253         }
254         dpaa2_q->tc_index = tc_id;
255
256         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
257         return 0;
258 }
259
260 static void
261 dpaa2_dev_rx_queue_release(void *q __rte_unused)
262 {
263         PMD_INIT_FUNC_TRACE();
264 }
265
266 static void
267 dpaa2_dev_tx_queue_release(void *q __rte_unused)
268 {
269         PMD_INIT_FUNC_TRACE();
270 }
271
272 static int
273 dpaa2_dev_start(struct rte_eth_dev *dev)
274 {
275         struct rte_eth_dev_data *data = dev->data;
276         struct dpaa2_dev_priv *priv = data->dev_private;
277         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
278         struct dpni_queue cfg;
279         uint16_t qdid;
280         struct dpni_queue_id qid;
281         struct dpaa2_queue *dpaa2_q;
282         int ret, i;
283
284         PMD_INIT_FUNC_TRACE();
285
286         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
287         if (ret) {
288                 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
289                              ret, priv->hw_id);
290                 return ret;
291         }
292
293         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
294                             DPNI_QUEUE_TX, &qdid);
295         if (ret) {
296                 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
297                 return ret;
298         }
299         priv->qdid = qdid;
300
301         for (i = 0; i < data->nb_rx_queues; i++) {
302                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
303                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
304                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
305                                        dpaa2_q->flow_id, &cfg, &qid);
306                 if (ret) {
307                         PMD_INIT_LOG(ERR, "Error to get flow "
308                                      "information Error code = %d\n", ret);
309                         return ret;
310                 }
311                 dpaa2_q->fqid = qid.fqid;
312         }
313
314         return 0;
315 }
316
317 /**
318  *  This routine disables all traffic on the adapter by issuing a
319  *  global reset on the MAC.
320  */
321 static void
322 dpaa2_dev_stop(struct rte_eth_dev *dev)
323 {
324         struct dpaa2_dev_priv *priv = dev->data->dev_private;
325         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
326         int ret;
327
328         PMD_INIT_FUNC_TRACE();
329
330         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
331         if (ret) {
332                 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
333                              ret, priv->hw_id);
334                 return;
335         }
336 }
337
338 static void
339 dpaa2_dev_close(struct rte_eth_dev *dev)
340 {
341         struct dpaa2_dev_priv *priv = dev->data->dev_private;
342         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
343         int ret;
344
345         PMD_INIT_FUNC_TRACE();
346
347         /* Clean the device first */
348         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
349         if (ret) {
350                 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
351                              " error code %d\n", ret);
352                 return;
353         }
354 }
355
356 static struct eth_dev_ops dpaa2_ethdev_ops = {
357         .dev_configure    = dpaa2_eth_dev_configure,
358         .dev_start            = dpaa2_dev_start,
359         .dev_stop             = dpaa2_dev_stop,
360         .dev_close            = dpaa2_dev_close,
361         .dev_infos_get     = dpaa2_dev_info_get,
362         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
363         .rx_queue_release  = dpaa2_dev_rx_queue_release,
364         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
365         .tx_queue_release  = dpaa2_dev_tx_queue_release,
366 };
367
368 static int
369 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
370 {
371         struct rte_device *dev = eth_dev->device;
372         struct rte_dpaa2_device *dpaa2_dev;
373         struct fsl_mc_io *dpni_dev;
374         struct dpni_attr attr;
375         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
376         int ret, hw_id;
377
378         PMD_INIT_FUNC_TRACE();
379
380         /* For secondary processes, the primary has done all the work */
381         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
382                 return 0;
383
384         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
385
386         hw_id = dpaa2_dev->object_id;
387
388         dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
389         if (!dpni_dev) {
390                 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
391                 return -1;
392         }
393
394         dpni_dev->regs = rte_mcp_ptr_list[0];
395         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
396         if (ret) {
397                 PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
398                         " error code %d\n", hw_id, ret);
399                 return -1;
400         }
401
402         /* Clean the device first */
403         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
404         if (ret) {
405                 PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
406                         " error code %d\n", hw_id, ret);
407                 return -1;
408         }
409
410         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
411         if (ret) {
412                 PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
413                         " error code %d\n", hw_id, ret);
414                 return -1;
415         }
416
417         priv->num_tc = attr.num_tcs;
418         priv->nb_rx_queues = attr.num_queues;
419         priv->nb_tx_queues = attr.num_queues;
420
421         priv->hw = dpni_dev;
422         priv->hw_id = hw_id;
423         priv->flags = 0;
424
425         /* Allocate memory for hardware structure for queues */
426         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
427         if (ret) {
428                 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
429                 return -ret;
430         }
431
432         eth_dev->dev_ops = &dpaa2_ethdev_ops;
433         eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
434
435         return 0;
436 }
437
438 static int
439 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
440 {
441         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
442         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
443         int i, ret;
444         struct dpaa2_queue *dpaa2_q;
445
446         PMD_INIT_FUNC_TRACE();
447
448         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
449                 return -EPERM;
450
451         if (!dpni) {
452                 PMD_INIT_LOG(WARNING, "Already closed or not started");
453                 return -1;
454         }
455
456         dpaa2_dev_close(eth_dev);
457
458         if (priv->rx_vq[0]) {
459                 /* cleaning up queue storage */
460                 for (i = 0; i < priv->nb_rx_queues; i++) {
461                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
462                         if (dpaa2_q->q_storage)
463                                 rte_free(dpaa2_q->q_storage);
464                 }
465                 /*free the all queue memory */
466                 rte_free(priv->rx_vq[0]);
467                 priv->rx_vq[0] = NULL;
468         }
469
470
471         /*Close the device at underlying layer*/
472         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
473         if (ret) {
474                 PMD_INIT_LOG(ERR, "Failure closing dpni device with"
475                         " error code %d\n", ret);
476         }
477
478         /*Free the allocated memory for ethernet private data and dpni*/
479         priv->hw = NULL;
480         free(dpni);
481
482         eth_dev->dev_ops = NULL;
483
484         return 0;
485 }
486
487 static int
488 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
489                 struct rte_dpaa2_device *dpaa2_dev)
490 {
491         struct rte_eth_dev *eth_dev;
492         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
493
494         int diag;
495
496         sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
497
498         eth_dev = rte_eth_dev_allocate(ethdev_name);
499         if (eth_dev == NULL)
500                 return -ENOMEM;
501
502         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
503                 eth_dev->data->dev_private = rte_zmalloc(
504                                                 "ethdev private structure",
505                                                 sizeof(struct dpaa2_dev_priv),
506                                                 RTE_CACHE_LINE_SIZE);
507                 if (eth_dev->data->dev_private == NULL) {
508                         PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
509                                      " private port data\n");
510                         rte_eth_dev_release_port(eth_dev);
511                         return -ENOMEM;
512                 }
513         }
514         eth_dev->device = &dpaa2_dev->device;
515         dpaa2_dev->eth_dev = eth_dev;
516         eth_dev->data->rx_mbuf_alloc_failed = 0;
517
518         /* Invoke PMD device initialization function */
519         diag = dpaa2_dev_init(eth_dev);
520         if (diag == 0)
521                 return 0;
522
523         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
524                 rte_free(eth_dev->data->dev_private);
525         rte_eth_dev_release_port(eth_dev);
526         return diag;
527 }
528
529 static int
530 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
531 {
532         struct rte_eth_dev *eth_dev;
533
534         eth_dev = dpaa2_dev->eth_dev;
535         dpaa2_dev_uninit(eth_dev);
536
537         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
538                 rte_free(eth_dev->data->dev_private);
539         rte_eth_dev_release_port(eth_dev);
540
541         return 0;
542 }
543
544 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
545         .drv_type = DPAA2_MC_DPNI_DEVID,
546         .probe = rte_dpaa2_probe,
547         .remove = rte_dpaa2_remove,
548 };
549
550 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);