drivers: add newline in dpaa2 logs
[dpdk.git] / drivers / net / dpaa2 / base / dpaa2_hw_dpni.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2016 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_ethdev.h>
46
47 #include <fslmc_logs.h>
48 #include <dpaa2_hw_pvt.h>
49 #include <dpaa2_hw_mempool.h>
50
51 #include "../dpaa2_ethdev.h"
52
53 static void
54 dpaa2_distset_to_dpkg_profile_cfg(
55                 uint32_t req_dist_set,
56                 struct dpkg_profile_cfg *kg_cfg);
57
58 int
59 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
60                       uint32_t req_dist_set)
61 {
62         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
63         struct fsl_mc_io *dpni = priv->hw;
64         struct dpni_rx_tc_dist_cfg tc_cfg;
65         struct dpkg_profile_cfg kg_cfg;
66         void *p_params;
67         int ret, tc_index = 0;
68
69         p_params = rte_malloc(
70                 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
71         if (!p_params) {
72                 RTE_LOG(ERR, PMD, "Memory unavaialble\n");
73                 return -ENOMEM;
74         }
75         memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
76         memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
77
78         dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
79         tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
80         tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
81         tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
82
83         ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
84         if (ret) {
85                 RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
86                 rte_free(p_params);
87                 return ret;
88         }
89
90         ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
91                                   &tc_cfg);
92         rte_free(p_params);
93         if (ret) {
94                 RTE_LOG(ERR, PMD,
95                         "Setting distribution for Rx failed with err: %d\n",
96                         ret);
97                 return ret;
98         }
99
100         return 0;
101 }
102
103 int dpaa2_remove_flow_dist(
104         struct rte_eth_dev *eth_dev,
105         uint8_t tc_index)
106 {
107         struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
108         struct fsl_mc_io *dpni = priv->hw;
109         struct dpni_rx_tc_dist_cfg tc_cfg;
110         struct dpkg_profile_cfg kg_cfg;
111         void *p_params;
112         int ret;
113
114         p_params = rte_malloc(
115                 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
116         if (!p_params) {
117                 RTE_LOG(ERR, PMD, "Memory unavaialble\n");
118                 return -ENOMEM;
119         }
120         memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
121         memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
122
123         tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
124         tc_cfg.dist_size = 0;
125         tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
126
127         ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
128         if (ret) {
129                 RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
130                 rte_free(p_params);
131                 return ret;
132         }
133
134         ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
135                                   &tc_cfg);
136         rte_free(p_params);
137         if (ret) {
138                 RTE_LOG(ERR, PMD,
139                         "Setting distribution for Rx failed with err: %d\n",
140                         ret);
141                 return ret;
142         }
143         return ret;
144 }
145
146 static void
147 dpaa2_distset_to_dpkg_profile_cfg(
148                 uint32_t req_dist_set,
149                 struct dpkg_profile_cfg *kg_cfg)
150 {
151         uint32_t loop = 0, i = 0, dist_field = 0;
152         int l2_configured = 0, l3_configured = 0;
153         int l4_configured = 0, sctp_configured = 0;
154
155         memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
156         while (req_dist_set) {
157                 if (req_dist_set % 2 != 0) {
158                         dist_field = 1U << loop;
159                         switch (dist_field) {
160                         case ETH_RSS_L2_PAYLOAD:
161
162                                 if (l2_configured)
163                                         break;
164                                 l2_configured = 1;
165
166                                 kg_cfg->extracts[i].extract.from_hdr.prot =
167                                         NET_PROT_ETH;
168                                 kg_cfg->extracts[i].extract.from_hdr.field =
169                                         NH_FLD_ETH_TYPE;
170                                 kg_cfg->extracts[i].type =
171                                         DPKG_EXTRACT_FROM_HDR;
172                                 kg_cfg->extracts[i].extract.from_hdr.type =
173                                         DPKG_FULL_FIELD;
174                                 i++;
175                         break;
176
177                         case ETH_RSS_IPV4:
178                         case ETH_RSS_FRAG_IPV4:
179                         case ETH_RSS_NONFRAG_IPV4_OTHER:
180                         case ETH_RSS_IPV6:
181                         case ETH_RSS_FRAG_IPV6:
182                         case ETH_RSS_NONFRAG_IPV6_OTHER:
183                         case ETH_RSS_IPV6_EX:
184
185                                 if (l3_configured)
186                                         break;
187                                 l3_configured = 1;
188
189                                 kg_cfg->extracts[i].extract.from_hdr.prot =
190                                         NET_PROT_IP;
191                                 kg_cfg->extracts[i].extract.from_hdr.field =
192                                         NH_FLD_IP_SRC;
193                                 kg_cfg->extracts[i].type =
194                                         DPKG_EXTRACT_FROM_HDR;
195                                 kg_cfg->extracts[i].extract.from_hdr.type =
196                                         DPKG_FULL_FIELD;
197                                 i++;
198
199                                 kg_cfg->extracts[i].extract.from_hdr.prot =
200                                         NET_PROT_IP;
201                                 kg_cfg->extracts[i].extract.from_hdr.field =
202                                         NH_FLD_IP_DST;
203                                 kg_cfg->extracts[i].type =
204                                         DPKG_EXTRACT_FROM_HDR;
205                                 kg_cfg->extracts[i].extract.from_hdr.type =
206                                         DPKG_FULL_FIELD;
207                                 i++;
208
209                                 kg_cfg->extracts[i].extract.from_hdr.prot =
210                                         NET_PROT_IP;
211                                 kg_cfg->extracts[i].extract.from_hdr.field =
212                                         NH_FLD_IP_PROTO;
213                                 kg_cfg->extracts[i].type =
214                                         DPKG_EXTRACT_FROM_HDR;
215                                 kg_cfg->extracts[i].extract.from_hdr.type =
216                                         DPKG_FULL_FIELD;
217                                 kg_cfg->num_extracts++;
218                                 i++;
219                         break;
220
221                         case ETH_RSS_NONFRAG_IPV4_TCP:
222                         case ETH_RSS_NONFRAG_IPV6_TCP:
223                         case ETH_RSS_NONFRAG_IPV4_UDP:
224                         case ETH_RSS_NONFRAG_IPV6_UDP:
225                         case ETH_RSS_IPV6_TCP_EX:
226                         case ETH_RSS_IPV6_UDP_EX:
227
228                                 if (l4_configured)
229                                         break;
230                                 l4_configured = 1;
231
232                                 kg_cfg->extracts[i].extract.from_hdr.prot =
233                                         NET_PROT_TCP;
234                                 kg_cfg->extracts[i].extract.from_hdr.field =
235                                         NH_FLD_TCP_PORT_SRC;
236                                 kg_cfg->extracts[i].type =
237                                         DPKG_EXTRACT_FROM_HDR;
238                                 kg_cfg->extracts[i].extract.from_hdr.type =
239                                         DPKG_FULL_FIELD;
240                                 i++;
241
242                                 kg_cfg->extracts[i].extract.from_hdr.prot =
243                                         NET_PROT_TCP;
244                                 kg_cfg->extracts[i].extract.from_hdr.field =
245                                         NH_FLD_TCP_PORT_SRC;
246                                 kg_cfg->extracts[i].type =
247                                         DPKG_EXTRACT_FROM_HDR;
248                                 kg_cfg->extracts[i].extract.from_hdr.type =
249                                         DPKG_FULL_FIELD;
250                                 i++;
251                                 break;
252
253                         case ETH_RSS_NONFRAG_IPV4_SCTP:
254                         case ETH_RSS_NONFRAG_IPV6_SCTP:
255
256                                 if (sctp_configured)
257                                         break;
258                                 sctp_configured = 1;
259
260                                 kg_cfg->extracts[i].extract.from_hdr.prot =
261                                         NET_PROT_SCTP;
262                                 kg_cfg->extracts[i].extract.from_hdr.field =
263                                         NH_FLD_SCTP_PORT_SRC;
264                                 kg_cfg->extracts[i].type =
265                                         DPKG_EXTRACT_FROM_HDR;
266                                 kg_cfg->extracts[i].extract.from_hdr.type =
267                                         DPKG_FULL_FIELD;
268                                 i++;
269
270                                 kg_cfg->extracts[i].extract.from_hdr.prot =
271                                         NET_PROT_SCTP;
272                                 kg_cfg->extracts[i].extract.from_hdr.field =
273                                         NH_FLD_SCTP_PORT_DST;
274                                 kg_cfg->extracts[i].type =
275                                         DPKG_EXTRACT_FROM_HDR;
276                                 kg_cfg->extracts[i].extract.from_hdr.type =
277                                         DPKG_FULL_FIELD;
278                                 i++;
279                                 break;
280
281                         default:
282                                 PMD_DRV_LOG(WARNING, "Bad flow distribution"
283                                             " option %x\n", dist_field);
284                         }
285                 }
286                 req_dist_set = req_dist_set >> 1;
287                 loop++;
288         }
289         kg_cfg->num_extracts = i;
290 }
291
292 int
293 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
294                      void *blist)
295 {
296         /* Function to attach a DPNI with a buffer pool list. Buffer pool list
297          * handle is passed in blist.
298          */
299         int32_t retcode;
300         struct fsl_mc_io *dpni = priv->hw;
301         struct dpni_pools_cfg bpool_cfg;
302         struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
303         struct dpni_buffer_layout layout;
304         int tot_size;
305
306         /* ... rx buffer layout .
307          * Check alignment for buffer layouts first
308          */
309
310         /* ... rx buffer layout ... */
311         tot_size = RTE_PKTMBUF_HEADROOM;
312         tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
313
314         memset(&layout, 0, sizeof(struct dpni_buffer_layout));
315         layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
316                          DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
317                          DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
318                          DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
319                          DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
320
321         layout.pass_frame_status = 1;
322         layout.private_data_size = DPAA2_FD_PTA_SIZE;
323         layout.pass_parser_result = 1;
324         layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
325         layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
326                                 DPAA2_MBUF_HW_ANNOTATION;
327         retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
328                                          DPNI_QUEUE_RX, &layout);
329         if (retcode) {
330                 PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
331                              retcode);
332                 return retcode;
333         }
334
335         /*Attach buffer pool to the network interface as described by the user*/
336         bpool_cfg.num_dpbp = 1;
337         bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
338         bpool_cfg.pools[0].backup_pool = 0;
339         bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
340                                                 DPAA2_PACKET_LAYOUT_ALIGN);
341
342         retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
343         if (retcode != 0) {
344                 PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
345                                 " bpid = %d Error code = %d\n",
346                                 bpool_cfg.pools[0].dpbp_id, retcode);
347                 return retcode;
348         }
349
350         priv->bp_list = bp_list;
351         return 0;
352 }