1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "../dpaa2_ethdev.h"
27 dpaa2_distset_to_dpkg_profile_cfg(
28 uint64_t req_dist_set,
29 struct dpkg_profile_cfg *kg_cfg);
32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
36 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 struct fsl_mc_io *dpni = priv->hw;
39 struct dpni_rx_tc_dist_cfg tc_cfg;
40 struct dpkg_profile_cfg kg_cfg;
42 int ret, tc_index = 0;
44 if (!rte_eth_dev_is_valid_port(port_id)) {
45 DPAA2_PMD_WARN("Invalid port id %u", port_id);
49 if (strcmp(eth_dev->device->driver->name,
50 RTE_STR(NET_DPAA2_PMD_DRIVER_NAME))) {
51 DPAA2_PMD_WARN("Not a valid dpaa2 port");
55 p_params = rte_zmalloc(
56 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
58 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
62 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
63 kg_cfg.extracts[0].extract.from_data.offset = offset;
64 kg_cfg.extracts[0].extract.from_data.size = size;
65 kg_cfg.extracts[0].num_of_byte_masks = 0;
66 kg_cfg.num_extracts = 1;
68 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
70 DPAA2_PMD_ERR("Unable to prepare extract parameters");
75 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
76 tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
77 tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
78 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
80 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
85 "Setting distribution for Rx failed with err: %d",
94 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
95 uint64_t req_dist_set, int tc_index)
97 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
98 struct fsl_mc_io *dpni = priv->hw;
99 struct dpni_rx_dist_cfg tc_cfg;
100 struct dpkg_profile_cfg kg_cfg;
102 int ret, tc_dist_queues;
104 /*TC distribution size is set with dist_queues or
105 * nb_rx_queues % dist_queues in order of TC priority index.
106 * Calculating dist size for this tc_index:-
108 tc_dist_queues = eth_dev->data->nb_rx_queues -
109 tc_index * priv->dist_queues;
110 if (tc_dist_queues <= 0) {
111 DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
115 if (tc_dist_queues > priv->dist_queues)
116 tc_dist_queues = priv->dist_queues;
118 p_params = rte_malloc(
119 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
121 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
125 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
126 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
128 ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
130 DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
136 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
137 tc_cfg.dist_size = tc_dist_queues;
138 tc_cfg.enable = true;
139 tc_cfg.tc = tc_index;
141 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
143 DPAA2_PMD_ERR("Unable to prepare extract parameters");
148 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
152 "Setting distribution for Rx failed with err: %d",
160 int dpaa2_remove_flow_dist(
161 struct rte_eth_dev *eth_dev,
164 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
165 struct fsl_mc_io *dpni = priv->hw;
166 struct dpni_rx_dist_cfg tc_cfg;
167 struct dpkg_profile_cfg kg_cfg;
171 p_params = rte_malloc(
172 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
174 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
178 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
179 tc_cfg.dist_size = 0;
180 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
181 tc_cfg.enable = true;
182 tc_cfg.tc = tc_index;
184 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
185 kg_cfg.num_extracts = 0;
186 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
188 DPAA2_PMD_ERR("Unable to prepare extract parameters");
193 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
198 "Setting distribution for Rx failed with err: %d",
204 dpaa2_distset_to_dpkg_profile_cfg(
205 uint64_t req_dist_set,
206 struct dpkg_profile_cfg *kg_cfg)
208 uint32_t loop = 0, i = 0;
209 uint64_t dist_field = 0;
210 int l2_configured = 0, l3_configured = 0;
211 int l4_configured = 0, sctp_configured = 0;
212 int mpls_configured = 0;
213 int vlan_configured = 0;
214 int esp_configured = 0;
215 int ah_configured = 0;
216 int pppoe_configured = 0;
218 memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
219 while (req_dist_set) {
220 if (req_dist_set % 2 != 0) {
221 dist_field = 1ULL << loop;
222 switch (dist_field) {
223 case RTE_ETH_RSS_L2_PAYLOAD:
224 case RTE_ETH_RSS_ETH:
229 kg_cfg->extracts[i].extract.from_hdr.prot =
231 kg_cfg->extracts[i].extract.from_hdr.field =
233 kg_cfg->extracts[i].type =
234 DPKG_EXTRACT_FROM_HDR;
235 kg_cfg->extracts[i].extract.from_hdr.type =
240 case RTE_ETH_RSS_PPPOE:
241 if (pppoe_configured)
243 kg_cfg->extracts[i].extract.from_hdr.prot =
245 kg_cfg->extracts[i].extract.from_hdr.field =
247 kg_cfg->extracts[i].type =
248 DPKG_EXTRACT_FROM_HDR;
249 kg_cfg->extracts[i].extract.from_hdr.type =
254 case RTE_ETH_RSS_ESP:
259 kg_cfg->extracts[i].extract.from_hdr.prot =
261 kg_cfg->extracts[i].extract.from_hdr.field =
262 NH_FLD_IPSEC_ESP_SPI;
263 kg_cfg->extracts[i].type =
264 DPKG_EXTRACT_FROM_HDR;
265 kg_cfg->extracts[i].extract.from_hdr.type =
275 kg_cfg->extracts[i].extract.from_hdr.prot =
277 kg_cfg->extracts[i].extract.from_hdr.field =
279 kg_cfg->extracts[i].type =
280 DPKG_EXTRACT_FROM_HDR;
281 kg_cfg->extracts[i].extract.from_hdr.type =
286 case RTE_ETH_RSS_C_VLAN:
287 case RTE_ETH_RSS_S_VLAN:
292 kg_cfg->extracts[i].extract.from_hdr.prot =
294 kg_cfg->extracts[i].extract.from_hdr.field =
296 kg_cfg->extracts[i].type =
297 DPKG_EXTRACT_FROM_HDR;
298 kg_cfg->extracts[i].extract.from_hdr.type =
303 case RTE_ETH_RSS_MPLS:
309 kg_cfg->extracts[i].extract.from_hdr.prot =
311 kg_cfg->extracts[i].extract.from_hdr.field =
313 kg_cfg->extracts[i].type =
314 DPKG_EXTRACT_FROM_HDR;
315 kg_cfg->extracts[i].extract.from_hdr.type =
319 kg_cfg->extracts[i].extract.from_hdr.prot =
321 kg_cfg->extracts[i].extract.from_hdr.field =
323 kg_cfg->extracts[i].type =
324 DPKG_EXTRACT_FROM_HDR;
325 kg_cfg->extracts[i].extract.from_hdr.type =
329 kg_cfg->extracts[i].extract.from_hdr.prot =
331 kg_cfg->extracts[i].extract.from_hdr.field =
333 kg_cfg->extracts[i].type =
334 DPKG_EXTRACT_FROM_HDR;
335 kg_cfg->extracts[i].extract.from_hdr.type =
340 case RTE_ETH_RSS_IPV4:
341 case RTE_ETH_RSS_FRAG_IPV4:
342 case RTE_ETH_RSS_NONFRAG_IPV4_OTHER:
343 case RTE_ETH_RSS_IPV6:
344 case RTE_ETH_RSS_FRAG_IPV6:
345 case RTE_ETH_RSS_NONFRAG_IPV6_OTHER:
346 case RTE_ETH_RSS_IPV6_EX:
352 kg_cfg->extracts[i].extract.from_hdr.prot =
354 kg_cfg->extracts[i].extract.from_hdr.field =
356 kg_cfg->extracts[i].type =
357 DPKG_EXTRACT_FROM_HDR;
358 kg_cfg->extracts[i].extract.from_hdr.type =
362 kg_cfg->extracts[i].extract.from_hdr.prot =
364 kg_cfg->extracts[i].extract.from_hdr.field =
366 kg_cfg->extracts[i].type =
367 DPKG_EXTRACT_FROM_HDR;
368 kg_cfg->extracts[i].extract.from_hdr.type =
372 kg_cfg->extracts[i].extract.from_hdr.prot =
374 kg_cfg->extracts[i].extract.from_hdr.field =
376 kg_cfg->extracts[i].type =
377 DPKG_EXTRACT_FROM_HDR;
378 kg_cfg->extracts[i].extract.from_hdr.type =
380 kg_cfg->num_extracts++;
384 case RTE_ETH_RSS_NONFRAG_IPV4_TCP:
385 case RTE_ETH_RSS_NONFRAG_IPV6_TCP:
386 case RTE_ETH_RSS_NONFRAG_IPV4_UDP:
387 case RTE_ETH_RSS_NONFRAG_IPV6_UDP:
388 case RTE_ETH_RSS_IPV6_TCP_EX:
389 case RTE_ETH_RSS_IPV6_UDP_EX:
395 kg_cfg->extracts[i].extract.from_hdr.prot =
397 kg_cfg->extracts[i].extract.from_hdr.field =
399 kg_cfg->extracts[i].type =
400 DPKG_EXTRACT_FROM_HDR;
401 kg_cfg->extracts[i].extract.from_hdr.type =
405 kg_cfg->extracts[i].extract.from_hdr.prot =
407 kg_cfg->extracts[i].extract.from_hdr.field =
409 kg_cfg->extracts[i].type =
410 DPKG_EXTRACT_FROM_HDR;
411 kg_cfg->extracts[i].extract.from_hdr.type =
416 case RTE_ETH_RSS_NONFRAG_IPV4_SCTP:
417 case RTE_ETH_RSS_NONFRAG_IPV6_SCTP:
423 kg_cfg->extracts[i].extract.from_hdr.prot =
425 kg_cfg->extracts[i].extract.from_hdr.field =
426 NH_FLD_SCTP_PORT_SRC;
427 kg_cfg->extracts[i].type =
428 DPKG_EXTRACT_FROM_HDR;
429 kg_cfg->extracts[i].extract.from_hdr.type =
433 kg_cfg->extracts[i].extract.from_hdr.prot =
435 kg_cfg->extracts[i].extract.from_hdr.field =
436 NH_FLD_SCTP_PORT_DST;
437 kg_cfg->extracts[i].type =
438 DPKG_EXTRACT_FROM_HDR;
439 kg_cfg->extracts[i].extract.from_hdr.type =
446 "unsupported flow dist option 0x%" PRIx64,
451 req_dist_set = req_dist_set >> 1;
454 kg_cfg->num_extracts = i;
459 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
462 /* Function to attach a DPNI with a buffer pool list. Buffer pool list
463 * handle is passed in blist.
466 struct fsl_mc_io *dpni = priv->hw;
467 struct dpni_pools_cfg bpool_cfg;
468 struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
469 struct dpni_buffer_layout layout;
472 /* ... rx buffer layout .
473 * Check alignment for buffer layouts first
476 /* ... rx buffer layout ... */
477 tot_size = RTE_PKTMBUF_HEADROOM;
478 tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
480 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
481 layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
482 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
483 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
484 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
485 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
486 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
488 layout.pass_timestamp = true;
489 layout.pass_frame_status = 1;
490 layout.private_data_size = DPAA2_FD_PTA_SIZE;
491 layout.pass_parser_result = 1;
492 layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
493 layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
494 DPAA2_MBUF_HW_ANNOTATION;
495 retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
496 DPNI_QUEUE_RX, &layout);
498 DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
503 /*Attach buffer pool to the network interface as described by the user*/
504 memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
505 bpool_cfg.num_dpbp = 1;
506 bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
507 bpool_cfg.pools[0].backup_pool = 0;
508 bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
509 DPAA2_PACKET_LAYOUT_ALIGN);
510 bpool_cfg.pools[0].priority_mask = 0;
512 retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
514 DPAA2_PMD_ERR("Error configuring buffer pool on interface."
515 " bpid = %d error code = %d",
516 bpool_cfg.pools[0].dpbp_id, retcode);
520 priv->bp_list = bp_list;