net/dpaa2: add RSS flow distribution
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_ethdev.h>
46 #include <rte_fslmc.h>
47
48 #include <fslmc_logs.h>
49 #include <fslmc_vfio.h>
50 #include <dpaa2_hw_pvt.h>
51
52 #include "dpaa2_ethdev.h"
53
54 static struct rte_dpaa2_driver rte_dpaa2_pmd;
55
56 static void
57 dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
58 {
59         struct dpaa2_dev_priv *priv = dev->data->dev_private;
60
61         PMD_INIT_FUNC_TRACE();
62
63         dev_info->if_index = priv->hw_id;
64
65         dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
66         dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
67
68         dev_info->speed_capa = ETH_LINK_SPEED_1G |
69                         ETH_LINK_SPEED_2_5G |
70                         ETH_LINK_SPEED_10G;
71 }
72
73 static int
74 dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
75 {
76         struct dpaa2_dev_priv *priv = dev->data->dev_private;
77         uint16_t dist_idx;
78         uint32_t vq_id;
79         struct dpaa2_queue *mc_q, *mcq;
80         uint32_t tot_queues;
81         int i;
82         struct dpaa2_queue *dpaa2_q;
83
84         PMD_INIT_FUNC_TRACE();
85
86         tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
87         mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
88                           RTE_CACHE_LINE_SIZE);
89         if (!mc_q) {
90                 PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
91                 return -1;
92         }
93
94         for (i = 0; i < priv->nb_rx_queues; i++) {
95                 mc_q->dev = dev;
96                 priv->rx_vq[i] = mc_q++;
97                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
98                 dpaa2_q->q_storage = rte_malloc("dq_storage",
99                                         sizeof(struct queue_storage_info_t),
100                                         RTE_CACHE_LINE_SIZE);
101                 if (!dpaa2_q->q_storage)
102                         goto fail;
103
104                 memset(dpaa2_q->q_storage, 0,
105                        sizeof(struct queue_storage_info_t));
106                 dpaa2_q->q_storage->dq_storage[0] = rte_malloc(NULL,
107                         DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
108                         RTE_CACHE_LINE_SIZE);
109         }
110
111         for (i = 0; i < priv->nb_tx_queues; i++) {
112                 mc_q->dev = dev;
113                 mc_q->flow_id = DPNI_NEW_FLOW_ID;
114                 priv->tx_vq[i] = mc_q++;
115         }
116
117         vq_id = 0;
118         for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
119              dist_idx++) {
120                 mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
121                 mcq->tc_index = DPAA2_DEF_TC;
122                 mcq->flow_id = dist_idx;
123                 vq_id++;
124         }
125
126         return 0;
127 fail:
128         i -= 1;
129         mc_q = priv->rx_vq[0];
130         while (i >= 0) {
131                 dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
132                 rte_free(dpaa2_q->q_storage->dq_storage[0]);
133                 rte_free(dpaa2_q->q_storage);
134                 priv->rx_vq[i--] = NULL;
135         }
136         rte_free(mc_q);
137         return -1;
138 }
139
140 static int
141 dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
142 {
143         struct rte_eth_dev_data *data = dev->data;
144         struct rte_eth_conf *eth_conf = &data->dev_conf;
145         int ret;
146
147         PMD_INIT_FUNC_TRACE();
148
149         /* Check for correct configuration */
150         if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
151             data->nb_rx_queues > 1) {
152                 PMD_INIT_LOG(ERR, "Distribution is not enabled, "
153                             "but Rx queues more than 1\n");
154                 return -1;
155         }
156
157         if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
158                 /* Return in case number of Rx queues is 1 */
159                 if (data->nb_rx_queues == 1)
160                         return 0;
161                 ret = dpaa2_setup_flow_dist(dev,
162                                 eth_conf->rx_adv_conf.rss_conf.rss_hf);
163                 if (ret) {
164                         PMD_INIT_LOG(ERR, "unable to set flow distribution."
165                                      "please check queue config\n");
166                         return ret;
167                 }
168         }
169         return 0;
170 }
171
172 /* Function to setup RX flow information. It contains traffic class ID,
173  * flow ID, destination configuration etc.
174  */
175 static int
176 dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
177                          uint16_t rx_queue_id,
178                          uint16_t nb_rx_desc __rte_unused,
179                          unsigned int socket_id __rte_unused,
180                          const struct rte_eth_rxconf *rx_conf __rte_unused,
181                          struct rte_mempool *mb_pool)
182 {
183         struct dpaa2_dev_priv *priv = dev->data->dev_private;
184         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
185         struct dpaa2_queue *dpaa2_q;
186         struct dpni_queue cfg;
187         uint8_t options = 0;
188         uint8_t flow_id;
189         int ret;
190
191         PMD_INIT_FUNC_TRACE();
192
193         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
194                      dev, rx_queue_id, mb_pool, rx_conf);
195
196         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
197         dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
198
199         /*Get the tc id and flow id from given VQ id*/
200         flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
201         memset(&cfg, 0, sizeof(struct dpni_queue));
202
203         options = options | DPNI_QUEUE_OPT_USER_CTX;
204         cfg.user_context = (uint64_t)(dpaa2_q);
205
206         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
207                              dpaa2_q->tc_index, flow_id, options, &cfg);
208         if (ret) {
209                 PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
210                 return -1;
211         }
212
213         dev->data->rx_queues[rx_queue_id] = dpaa2_q;
214         return 0;
215 }
216
217 static int
218 dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
219                          uint16_t tx_queue_id,
220                          uint16_t nb_tx_desc __rte_unused,
221                          unsigned int socket_id __rte_unused,
222                          const struct rte_eth_txconf *tx_conf __rte_unused)
223 {
224         struct dpaa2_dev_priv *priv = dev->data->dev_private;
225         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
226                 priv->tx_vq[tx_queue_id];
227         struct fsl_mc_io *dpni = priv->hw;
228         struct dpni_queue tx_conf_cfg;
229         struct dpni_queue tx_flow_cfg;
230         uint8_t options = 0, flow_id;
231         uint32_t tc_id;
232         int ret;
233
234         PMD_INIT_FUNC_TRACE();
235
236         /* Return if queue already configured */
237         if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
238                 return 0;
239
240         memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
241         memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
242
243         tc_id = 0;
244         flow_id = tx_queue_id;
245
246         ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
247                              tc_id, flow_id, options, &tx_flow_cfg);
248         if (ret) {
249                 PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
250                              "tc_id=%d, flow =%d ErrorCode = %x\n",
251                              tc_id, flow_id, -ret);
252                         return -1;
253         }
254
255         dpaa2_q->flow_id = flow_id;
256
257         if (tx_queue_id == 0) {
258                 /*Set tx-conf and error configuration*/
259                 ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
260                                                     priv->token,
261                                                     DPNI_CONF_DISABLE);
262                 if (ret) {
263                         PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
264                                      " ErrorCode = %x", ret);
265                         return -1;
266                 }
267         }
268         dpaa2_q->tc_index = tc_id;
269
270         dev->data->tx_queues[tx_queue_id] = dpaa2_q;
271         return 0;
272 }
273
274 static void
275 dpaa2_dev_rx_queue_release(void *q __rte_unused)
276 {
277         PMD_INIT_FUNC_TRACE();
278 }
279
280 static void
281 dpaa2_dev_tx_queue_release(void *q __rte_unused)
282 {
283         PMD_INIT_FUNC_TRACE();
284 }
285
286 static int
287 dpaa2_dev_start(struct rte_eth_dev *dev)
288 {
289         struct rte_eth_dev_data *data = dev->data;
290         struct dpaa2_dev_priv *priv = data->dev_private;
291         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
292         struct dpni_queue cfg;
293         uint16_t qdid;
294         struct dpni_queue_id qid;
295         struct dpaa2_queue *dpaa2_q;
296         int ret, i;
297
298         PMD_INIT_FUNC_TRACE();
299
300         ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
301         if (ret) {
302                 PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
303                              ret, priv->hw_id);
304                 return ret;
305         }
306
307         ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
308                             DPNI_QUEUE_TX, &qdid);
309         if (ret) {
310                 PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
311                 return ret;
312         }
313         priv->qdid = qdid;
314
315         for (i = 0; i < data->nb_rx_queues; i++) {
316                 dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
317                 ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
318                                      DPNI_QUEUE_RX, dpaa2_q->tc_index,
319                                        dpaa2_q->flow_id, &cfg, &qid);
320                 if (ret) {
321                         PMD_INIT_LOG(ERR, "Error to get flow "
322                                      "information Error code = %d\n", ret);
323                         return ret;
324                 }
325                 dpaa2_q->fqid = qid.fqid;
326         }
327
328         return 0;
329 }
330
331 /**
332  *  This routine disables all traffic on the adapter by issuing a
333  *  global reset on the MAC.
334  */
335 static void
336 dpaa2_dev_stop(struct rte_eth_dev *dev)
337 {
338         struct dpaa2_dev_priv *priv = dev->data->dev_private;
339         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
340         int ret;
341
342         PMD_INIT_FUNC_TRACE();
343
344         ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
345         if (ret) {
346                 PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
347                              ret, priv->hw_id);
348                 return;
349         }
350 }
351
352 static void
353 dpaa2_dev_close(struct rte_eth_dev *dev)
354 {
355         struct dpaa2_dev_priv *priv = dev->data->dev_private;
356         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
357         int ret;
358
359         PMD_INIT_FUNC_TRACE();
360
361         /* Clean the device first */
362         ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
363         if (ret) {
364                 PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
365                              " error code %d\n", ret);
366                 return;
367         }
368 }
369
370 static struct eth_dev_ops dpaa2_ethdev_ops = {
371         .dev_configure    = dpaa2_eth_dev_configure,
372         .dev_start            = dpaa2_dev_start,
373         .dev_stop             = dpaa2_dev_stop,
374         .dev_close            = dpaa2_dev_close,
375         .dev_infos_get     = dpaa2_dev_info_get,
376         .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
377         .rx_queue_release  = dpaa2_dev_rx_queue_release,
378         .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
379         .tx_queue_release  = dpaa2_dev_tx_queue_release,
380 };
381
382 static int
383 dpaa2_dev_init(struct rte_eth_dev *eth_dev)
384 {
385         struct rte_device *dev = eth_dev->device;
386         struct rte_dpaa2_device *dpaa2_dev;
387         struct fsl_mc_io *dpni_dev;
388         struct dpni_attr attr;
389         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
390         int i, ret, hw_id;
391
392         PMD_INIT_FUNC_TRACE();
393
394         /* For secondary processes, the primary has done all the work */
395         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
396                 return 0;
397
398         dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
399
400         hw_id = dpaa2_dev->object_id;
401
402         dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
403         if (!dpni_dev) {
404                 PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
405                 return -1;
406         }
407
408         dpni_dev->regs = rte_mcp_ptr_list[0];
409         ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
410         if (ret) {
411                 PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
412                         " error code %d\n", hw_id, ret);
413                 return -1;
414         }
415
416         /* Clean the device first */
417         ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
418         if (ret) {
419                 PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
420                         " error code %d\n", hw_id, ret);
421                 return -1;
422         }
423
424         ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
425         if (ret) {
426                 PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
427                         " error code %d\n", hw_id, ret);
428                 return -1;
429         }
430
431         priv->num_tc = attr.num_tcs;
432         for (i = 0; i < attr.num_tcs; i++) {
433                 priv->num_dist_per_tc[i] = attr.num_queues;
434                 break;
435         }
436
437         /* Distribution is per Tc only,
438          * so choosing RX queues from default TC only
439          */
440         priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
441
442         priv->nb_tx_queues = attr.num_queues;
443
444         priv->hw = dpni_dev;
445         priv->hw_id = hw_id;
446         priv->flags = 0;
447
448         /* Allocate memory for hardware structure for queues */
449         ret = dpaa2_alloc_rx_tx_queues(eth_dev);
450         if (ret) {
451                 PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
452                 return -ret;
453         }
454
455         eth_dev->dev_ops = &dpaa2_ethdev_ops;
456         eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
457
458         return 0;
459 }
460
461 static int
462 dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
463 {
464         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
465         struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
466         int i, ret;
467         struct dpaa2_queue *dpaa2_q;
468
469         PMD_INIT_FUNC_TRACE();
470
471         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
472                 return -EPERM;
473
474         if (!dpni) {
475                 PMD_INIT_LOG(WARNING, "Already closed or not started");
476                 return -1;
477         }
478
479         dpaa2_dev_close(eth_dev);
480
481         if (priv->rx_vq[0]) {
482                 /* cleaning up queue storage */
483                 for (i = 0; i < priv->nb_rx_queues; i++) {
484                         dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
485                         if (dpaa2_q->q_storage)
486                                 rte_free(dpaa2_q->q_storage);
487                 }
488                 /*free the all queue memory */
489                 rte_free(priv->rx_vq[0]);
490                 priv->rx_vq[0] = NULL;
491         }
492
493
494         /*Close the device at underlying layer*/
495         ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
496         if (ret) {
497                 PMD_INIT_LOG(ERR, "Failure closing dpni device with"
498                         " error code %d\n", ret);
499         }
500
501         /*Free the allocated memory for ethernet private data and dpni*/
502         priv->hw = NULL;
503         free(dpni);
504
505         eth_dev->dev_ops = NULL;
506
507         return 0;
508 }
509
510 static int
511 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
512                 struct rte_dpaa2_device *dpaa2_dev)
513 {
514         struct rte_eth_dev *eth_dev;
515         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
516
517         int diag;
518
519         sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
520
521         eth_dev = rte_eth_dev_allocate(ethdev_name);
522         if (eth_dev == NULL)
523                 return -ENOMEM;
524
525         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
526                 eth_dev->data->dev_private = rte_zmalloc(
527                                                 "ethdev private structure",
528                                                 sizeof(struct dpaa2_dev_priv),
529                                                 RTE_CACHE_LINE_SIZE);
530                 if (eth_dev->data->dev_private == NULL) {
531                         PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
532                                      " private port data\n");
533                         rte_eth_dev_release_port(eth_dev);
534                         return -ENOMEM;
535                 }
536         }
537         eth_dev->device = &dpaa2_dev->device;
538         dpaa2_dev->eth_dev = eth_dev;
539         eth_dev->data->rx_mbuf_alloc_failed = 0;
540
541         /* Invoke PMD device initialization function */
542         diag = dpaa2_dev_init(eth_dev);
543         if (diag == 0)
544                 return 0;
545
546         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
547                 rte_free(eth_dev->data->dev_private);
548         rte_eth_dev_release_port(eth_dev);
549         return diag;
550 }
551
552 static int
553 rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
554 {
555         struct rte_eth_dev *eth_dev;
556
557         eth_dev = dpaa2_dev->eth_dev;
558         dpaa2_dev_uninit(eth_dev);
559
560         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
561                 rte_free(eth_dev->data->dev_private);
562         rte_eth_dev_release_port(eth_dev);
563
564         return 0;
565 }
566
567 static struct rte_dpaa2_driver rte_dpaa2_pmd = {
568         .drv_type = DPAA2_MC_DPNI_DEVID,
569         .probe = rte_dpaa2_probe,
570         .remove = rte_dpaa2_remove,
571 };
572
573 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);