1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "../dpaa2_ethdev.h"
27 dpaa2_distset_to_dpkg_profile_cfg(
28 uint64_t req_dist_set,
29 struct dpkg_profile_cfg *kg_cfg);
32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
36 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 struct fsl_mc_io *dpni = priv->hw;
39 struct dpni_rx_tc_dist_cfg tc_cfg;
40 struct dpkg_profile_cfg kg_cfg;
42 int ret, tc_index = 0;
44 if (!rte_eth_dev_is_valid_port(port_id)) {
45 DPAA2_PMD_WARN("Invalid port id %u", port_id);
49 if (strcmp(eth_dev->device->driver->name,
50 RTE_STR(NET_DPAA2_PMD_DRIVER_NAME))) {
51 DPAA2_PMD_WARN("Not a valid dpaa2 port");
55 p_params = rte_zmalloc(
56 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
58 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
62 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
63 kg_cfg.extracts[0].extract.from_data.offset = offset;
64 kg_cfg.extracts[0].extract.from_data.size = size;
65 kg_cfg.extracts[0].num_of_byte_masks = 0;
66 kg_cfg.num_extracts = 1;
68 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
70 DPAA2_PMD_ERR("Unable to prepare extract parameters");
75 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
76 tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
77 tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
78 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
80 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
85 "Setting distribution for Rx failed with err: %d",
94 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
95 uint64_t req_dist_set, int tc_index)
97 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
98 struct fsl_mc_io *dpni = priv->hw;
99 struct dpni_rx_dist_cfg tc_cfg;
100 struct dpkg_profile_cfg kg_cfg;
102 int ret, tc_dist_queues;
104 /*TC distribution size is set with dist_queues or
105 * nb_rx_queues % dist_queues in order of TC priority index.
106 * Calculating dist size for this tc_index:-
108 tc_dist_queues = eth_dev->data->nb_rx_queues -
109 tc_index * priv->dist_queues;
110 if (tc_dist_queues <= 0) {
111 DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
115 if (tc_dist_queues > priv->dist_queues)
116 tc_dist_queues = priv->dist_queues;
118 p_params = rte_malloc(
119 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
121 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
125 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
126 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
128 ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
130 DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
136 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
137 tc_cfg.dist_size = tc_dist_queues;
138 tc_cfg.enable = true;
139 tc_cfg.tc = tc_index;
141 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
143 DPAA2_PMD_ERR("Unable to prepare extract parameters");
148 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
152 "Setting distribution for Rx failed with err: %d",
160 int dpaa2_remove_flow_dist(
161 struct rte_eth_dev *eth_dev,
164 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
165 struct fsl_mc_io *dpni = priv->hw;
166 struct dpni_rx_dist_cfg tc_cfg;
167 struct dpkg_profile_cfg kg_cfg;
171 p_params = rte_malloc(
172 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
174 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
178 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
179 tc_cfg.dist_size = 0;
180 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
181 tc_cfg.enable = true;
182 tc_cfg.tc = tc_index;
184 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
185 kg_cfg.num_extracts = 0;
186 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
188 DPAA2_PMD_ERR("Unable to prepare extract parameters");
193 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
198 "Setting distribution for Rx failed with err: %d",
204 dpaa2_distset_to_dpkg_profile_cfg(
205 uint64_t req_dist_set,
206 struct dpkg_profile_cfg *kg_cfg)
208 uint32_t loop = 0, i = 0;
209 uint64_t dist_field = 0;
210 int l2_configured = 0, l3_configured = 0;
211 int l4_configured = 0, sctp_configured = 0;
212 int mpls_configured = 0;
214 memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
215 while (req_dist_set) {
216 if (req_dist_set % 2 != 0) {
217 dist_field = 1ULL << loop;
218 switch (dist_field) {
219 case ETH_RSS_L2_PAYLOAD:
225 kg_cfg->extracts[i].extract.from_hdr.prot =
227 kg_cfg->extracts[i].extract.from_hdr.field =
229 kg_cfg->extracts[i].type =
230 DPKG_EXTRACT_FROM_HDR;
231 kg_cfg->extracts[i].extract.from_hdr.type =
242 kg_cfg->extracts[i].extract.from_hdr.prot =
244 kg_cfg->extracts[i].extract.from_hdr.field =
246 kg_cfg->extracts[i].type =
247 DPKG_EXTRACT_FROM_HDR;
248 kg_cfg->extracts[i].extract.from_hdr.type =
252 kg_cfg->extracts[i].extract.from_hdr.prot =
254 kg_cfg->extracts[i].extract.from_hdr.field =
256 kg_cfg->extracts[i].type =
257 DPKG_EXTRACT_FROM_HDR;
258 kg_cfg->extracts[i].extract.from_hdr.type =
262 kg_cfg->extracts[i].extract.from_hdr.prot =
264 kg_cfg->extracts[i].extract.from_hdr.field =
266 kg_cfg->extracts[i].type =
267 DPKG_EXTRACT_FROM_HDR;
268 kg_cfg->extracts[i].extract.from_hdr.type =
274 case ETH_RSS_FRAG_IPV4:
275 case ETH_RSS_NONFRAG_IPV4_OTHER:
277 case ETH_RSS_FRAG_IPV6:
278 case ETH_RSS_NONFRAG_IPV6_OTHER:
279 case ETH_RSS_IPV6_EX:
285 kg_cfg->extracts[i].extract.from_hdr.prot =
287 kg_cfg->extracts[i].extract.from_hdr.field =
289 kg_cfg->extracts[i].type =
290 DPKG_EXTRACT_FROM_HDR;
291 kg_cfg->extracts[i].extract.from_hdr.type =
295 kg_cfg->extracts[i].extract.from_hdr.prot =
297 kg_cfg->extracts[i].extract.from_hdr.field =
299 kg_cfg->extracts[i].type =
300 DPKG_EXTRACT_FROM_HDR;
301 kg_cfg->extracts[i].extract.from_hdr.type =
305 kg_cfg->extracts[i].extract.from_hdr.prot =
307 kg_cfg->extracts[i].extract.from_hdr.field =
309 kg_cfg->extracts[i].type =
310 DPKG_EXTRACT_FROM_HDR;
311 kg_cfg->extracts[i].extract.from_hdr.type =
313 kg_cfg->num_extracts++;
317 case ETH_RSS_NONFRAG_IPV4_TCP:
318 case ETH_RSS_NONFRAG_IPV6_TCP:
319 case ETH_RSS_NONFRAG_IPV4_UDP:
320 case ETH_RSS_NONFRAG_IPV6_UDP:
321 case ETH_RSS_IPV6_TCP_EX:
322 case ETH_RSS_IPV6_UDP_EX:
328 kg_cfg->extracts[i].extract.from_hdr.prot =
330 kg_cfg->extracts[i].extract.from_hdr.field =
332 kg_cfg->extracts[i].type =
333 DPKG_EXTRACT_FROM_HDR;
334 kg_cfg->extracts[i].extract.from_hdr.type =
338 kg_cfg->extracts[i].extract.from_hdr.prot =
340 kg_cfg->extracts[i].extract.from_hdr.field =
342 kg_cfg->extracts[i].type =
343 DPKG_EXTRACT_FROM_HDR;
344 kg_cfg->extracts[i].extract.from_hdr.type =
349 case ETH_RSS_NONFRAG_IPV4_SCTP:
350 case ETH_RSS_NONFRAG_IPV6_SCTP:
356 kg_cfg->extracts[i].extract.from_hdr.prot =
358 kg_cfg->extracts[i].extract.from_hdr.field =
359 NH_FLD_SCTP_PORT_SRC;
360 kg_cfg->extracts[i].type =
361 DPKG_EXTRACT_FROM_HDR;
362 kg_cfg->extracts[i].extract.from_hdr.type =
366 kg_cfg->extracts[i].extract.from_hdr.prot =
368 kg_cfg->extracts[i].extract.from_hdr.field =
369 NH_FLD_SCTP_PORT_DST;
370 kg_cfg->extracts[i].type =
371 DPKG_EXTRACT_FROM_HDR;
372 kg_cfg->extracts[i].extract.from_hdr.type =
379 "unsupported flow dist option 0x%" PRIx64,
384 req_dist_set = req_dist_set >> 1;
387 kg_cfg->num_extracts = i;
392 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
395 /* Function to attach a DPNI with a buffer pool list. Buffer pool list
396 * handle is passed in blist.
399 struct fsl_mc_io *dpni = priv->hw;
400 struct dpni_pools_cfg bpool_cfg;
401 struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
402 struct dpni_buffer_layout layout;
405 /* ... rx buffer layout .
406 * Check alignment for buffer layouts first
409 /* ... rx buffer layout ... */
410 tot_size = RTE_PKTMBUF_HEADROOM;
411 tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
413 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
414 layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
415 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
416 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
417 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
418 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
419 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
421 layout.pass_timestamp = true;
422 layout.pass_frame_status = 1;
423 layout.private_data_size = DPAA2_FD_PTA_SIZE;
424 layout.pass_parser_result = 1;
425 layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
426 layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
427 DPAA2_MBUF_HW_ANNOTATION;
428 retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
429 DPNI_QUEUE_RX, &layout);
431 DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
436 /*Attach buffer pool to the network interface as described by the user*/
437 memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
438 bpool_cfg.num_dpbp = 1;
439 bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
440 bpool_cfg.pools[0].backup_pool = 0;
441 bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
442 DPAA2_PACKET_LAYOUT_ALIGN);
443 bpool_cfg.pools[0].priority_mask = 0;
445 retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
447 DPAA2_PMD_ERR("Error configuring buffer pool on interface."
448 " bpid = %d error code = %d",
449 bpool_cfg.pools[0].dpbp_id, retcode);
453 priv->bp_list = bp_list;