1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "../dpaa2_ethdev.h"
27 dpaa2_distset_to_dpkg_profile_cfg(
28 uint64_t req_dist_set,
29 struct dpkg_profile_cfg *kg_cfg);
32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
36 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 struct fsl_mc_io *dpni = priv->hw;
39 struct dpni_rx_tc_dist_cfg tc_cfg;
40 struct dpkg_profile_cfg kg_cfg;
42 int ret, tc_index = 0;
44 p_params = rte_zmalloc(
45 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
47 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
51 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
52 kg_cfg.extracts[0].extract.from_data.offset = offset;
53 kg_cfg.extracts[0].extract.from_data.size = size;
54 kg_cfg.extracts[0].num_of_byte_masks = 0;
55 kg_cfg.num_extracts = 1;
57 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
59 DPAA2_PMD_ERR("Unable to prepare extract parameters");
64 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
65 tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
66 tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
67 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
69 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
74 "Setting distribution for Rx failed with err: %d",
83 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
84 uint64_t req_dist_set, int tc_index)
86 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
87 struct fsl_mc_io *dpni = priv->hw;
88 struct dpni_rx_dist_cfg tc_cfg;
89 struct dpkg_profile_cfg kg_cfg;
91 int ret, tc_dist_queues;
93 /*TC distribution size is set with dist_queues or
94 * nb_rx_queues % dist_queues in order of TC priority index.
95 * Calculating dist size for this tc_index:-
97 tc_dist_queues = eth_dev->data->nb_rx_queues -
98 tc_index * priv->dist_queues;
99 if (tc_dist_queues <= 0) {
100 DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
104 if (tc_dist_queues > priv->dist_queues)
105 tc_dist_queues = priv->dist_queues;
107 p_params = rte_malloc(
108 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
110 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
114 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
115 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
117 ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
119 DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
125 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
126 tc_cfg.dist_size = tc_dist_queues;
127 tc_cfg.enable = true;
128 tc_cfg.tc = tc_index;
130 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
132 DPAA2_PMD_ERR("Unable to prepare extract parameters");
137 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
141 "Setting distribution for Rx failed with err: %d",
149 int dpaa2_remove_flow_dist(
150 struct rte_eth_dev *eth_dev,
153 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
154 struct fsl_mc_io *dpni = priv->hw;
155 struct dpni_rx_dist_cfg tc_cfg;
156 struct dpkg_profile_cfg kg_cfg;
160 p_params = rte_malloc(
161 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
163 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
167 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
168 tc_cfg.dist_size = 0;
169 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
170 tc_cfg.enable = true;
171 tc_cfg.tc = tc_index;
173 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
174 kg_cfg.num_extracts = 0;
175 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
177 DPAA2_PMD_ERR("Unable to prepare extract parameters");
182 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
187 "Setting distribution for Rx failed with err: %d",
193 dpaa2_distset_to_dpkg_profile_cfg(
194 uint64_t req_dist_set,
195 struct dpkg_profile_cfg *kg_cfg)
197 uint32_t loop = 0, i = 0, dist_field = 0;
198 int l2_configured = 0, l3_configured = 0;
199 int l4_configured = 0, sctp_configured = 0;
201 memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
202 while (req_dist_set) {
203 if (req_dist_set % 2 != 0) {
204 dist_field = 1U << loop;
205 switch (dist_field) {
206 case ETH_RSS_L2_PAYLOAD:
212 kg_cfg->extracts[i].extract.from_hdr.prot =
214 kg_cfg->extracts[i].extract.from_hdr.field =
216 kg_cfg->extracts[i].type =
217 DPKG_EXTRACT_FROM_HDR;
218 kg_cfg->extracts[i].extract.from_hdr.type =
224 case ETH_RSS_FRAG_IPV4:
225 case ETH_RSS_NONFRAG_IPV4_OTHER:
227 case ETH_RSS_FRAG_IPV6:
228 case ETH_RSS_NONFRAG_IPV6_OTHER:
229 case ETH_RSS_IPV6_EX:
235 kg_cfg->extracts[i].extract.from_hdr.prot =
237 kg_cfg->extracts[i].extract.from_hdr.field =
239 kg_cfg->extracts[i].type =
240 DPKG_EXTRACT_FROM_HDR;
241 kg_cfg->extracts[i].extract.from_hdr.type =
245 kg_cfg->extracts[i].extract.from_hdr.prot =
247 kg_cfg->extracts[i].extract.from_hdr.field =
249 kg_cfg->extracts[i].type =
250 DPKG_EXTRACT_FROM_HDR;
251 kg_cfg->extracts[i].extract.from_hdr.type =
255 kg_cfg->extracts[i].extract.from_hdr.prot =
257 kg_cfg->extracts[i].extract.from_hdr.field =
259 kg_cfg->extracts[i].type =
260 DPKG_EXTRACT_FROM_HDR;
261 kg_cfg->extracts[i].extract.from_hdr.type =
263 kg_cfg->num_extracts++;
267 case ETH_RSS_NONFRAG_IPV4_TCP:
268 case ETH_RSS_NONFRAG_IPV6_TCP:
269 case ETH_RSS_NONFRAG_IPV4_UDP:
270 case ETH_RSS_NONFRAG_IPV6_UDP:
271 case ETH_RSS_IPV6_TCP_EX:
272 case ETH_RSS_IPV6_UDP_EX:
278 kg_cfg->extracts[i].extract.from_hdr.prot =
280 kg_cfg->extracts[i].extract.from_hdr.field =
282 kg_cfg->extracts[i].type =
283 DPKG_EXTRACT_FROM_HDR;
284 kg_cfg->extracts[i].extract.from_hdr.type =
288 kg_cfg->extracts[i].extract.from_hdr.prot =
290 kg_cfg->extracts[i].extract.from_hdr.field =
292 kg_cfg->extracts[i].type =
293 DPKG_EXTRACT_FROM_HDR;
294 kg_cfg->extracts[i].extract.from_hdr.type =
299 case ETH_RSS_NONFRAG_IPV4_SCTP:
300 case ETH_RSS_NONFRAG_IPV6_SCTP:
306 kg_cfg->extracts[i].extract.from_hdr.prot =
308 kg_cfg->extracts[i].extract.from_hdr.field =
309 NH_FLD_SCTP_PORT_SRC;
310 kg_cfg->extracts[i].type =
311 DPKG_EXTRACT_FROM_HDR;
312 kg_cfg->extracts[i].extract.from_hdr.type =
316 kg_cfg->extracts[i].extract.from_hdr.prot =
318 kg_cfg->extracts[i].extract.from_hdr.field =
319 NH_FLD_SCTP_PORT_DST;
320 kg_cfg->extracts[i].type =
321 DPKG_EXTRACT_FROM_HDR;
322 kg_cfg->extracts[i].extract.from_hdr.type =
329 "Unsupported flow dist option %x",
334 req_dist_set = req_dist_set >> 1;
337 kg_cfg->num_extracts = i;
342 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
345 /* Function to attach a DPNI with a buffer pool list. Buffer pool list
346 * handle is passed in blist.
349 struct fsl_mc_io *dpni = priv->hw;
350 struct dpni_pools_cfg bpool_cfg;
351 struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
352 struct dpni_buffer_layout layout;
355 /* ... rx buffer layout .
356 * Check alignment for buffer layouts first
359 /* ... rx buffer layout ... */
360 tot_size = RTE_PKTMBUF_HEADROOM;
361 tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
363 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
364 layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
365 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
366 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
367 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
368 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
369 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
371 layout.pass_timestamp = true;
372 layout.pass_frame_status = 1;
373 layout.private_data_size = DPAA2_FD_PTA_SIZE;
374 layout.pass_parser_result = 1;
375 layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
376 layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
377 DPAA2_MBUF_HW_ANNOTATION;
378 retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
379 DPNI_QUEUE_RX, &layout);
381 DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
386 /*Attach buffer pool to the network interface as described by the user*/
387 memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
388 bpool_cfg.num_dpbp = 1;
389 bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
390 bpool_cfg.pools[0].backup_pool = 0;
391 bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
392 DPAA2_PACKET_LAYOUT_ALIGN);
393 bpool_cfg.pools[0].priority_mask = 0;
395 retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
397 DPAA2_PMD_ERR("Error configuring buffer pool on interface."
398 " bpid = %d error code = %d",
399 bpool_cfg.pools[0].dpbp_id, retcode);
403 priv->bp_list = bp_list;