1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
4 #include <netinet/in.h>
6 #include <rte_malloc.h>
8 #include <rte_common.h>
10 #include <mlx5_common.h>
12 #include "mlx5_vdpa_utils.h"
13 #include "mlx5_vdpa.h"
16 mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
20 for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
21 if (priv->steer.rss[i].flow) {
22 claim_zero(mlx5_glue->dv_destroy_flow
23 (priv->steer.rss[i].flow));
24 priv->steer.rss[i].flow = NULL;
26 if (priv->steer.rss[i].tir_action) {
27 claim_zero(mlx5_glue->destroy_flow_action
28 (priv->steer.rss[i].tir_action));
29 priv->steer.rss[i].tir_action = NULL;
31 if (priv->steer.rss[i].tir) {
32 claim_zero(mlx5_devx_cmd_destroy
33 (priv->steer.rss[i].tir));
34 priv->steer.rss[i].tir = NULL;
36 if (priv->steer.rss[i].matcher) {
37 claim_zero(mlx5_glue->dv_destroy_flow_matcher
38 (priv->steer.rss[i].matcher));
39 priv->steer.rss[i].matcher = NULL;
45 mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
47 mlx5_vdpa_rss_flows_destroy(priv);
48 if (priv->steer.tbl) {
49 claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
50 priv->steer.tbl = NULL;
52 if (priv->steer.domain) {
53 claim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));
54 priv->steer.domain = NULL;
56 if (priv->steer.rqt) {
57 claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
58 priv->steer.rqt = NULL;
62 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
64 * Return the number of queues configured to the table on success, otherwise
68 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
71 uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
72 1 << priv->log_max_rqt_size);
73 struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
80 DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
84 for (i = 0; i < priv->nr_virtqs; i++) {
85 if (is_virtq_recvq(i, priv->nr_virtqs) &&
86 priv->virtqs[i].enable && priv->virtqs[i].virtq) {
87 attr->rq_list[k] = priv->virtqs[i].virtq->id;
92 /* No enabled RQ to configure for RSS. */
95 for (j = 0; k != rqt_n; ++k, ++j)
96 attr->rq_list[k] = attr->rq_list[j];
97 attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
98 attr->rqt_max_size = rqt_n;
99 attr->rqt_actual_size = rqt_n;
100 if (!priv->steer.rqt) {
101 priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);
102 if (!priv->steer.rqt) {
103 DRV_LOG(ERR, "Failed to create RQT.");
107 ret = mlx5_devx_cmd_modify_rqt(priv->steer.rqt, attr);
109 DRV_LOG(ERR, "Failed to modify RQT.");
112 return ret ? -1 : num;
115 static int __rte_unused
116 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
118 #ifdef HAVE_MLX5DV_DR
119 struct mlx5_devx_tir_attr tir_att = {
120 .disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,
121 .rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,
122 .transport_domain = priv->td->id,
123 .indirect_table = priv->steer.rqt->id,
124 .rx_hash_symmetric = 1,
125 .rx_hash_toeplitz_key = { 0x2c, 0xc6, 0x81, 0xd1,
126 0x5b, 0xdb, 0xf4, 0xf7,
127 0xfc, 0xa2, 0x83, 0x19,
128 0xdb, 0x1a, 0x3e, 0x94,
129 0x6b, 0x9e, 0x38, 0xd9,
130 0x2c, 0x9c, 0x03, 0xd1,
131 0xad, 0x99, 0x44, 0xa7,
132 0xd9, 0x56, 0x3d, 0x59,
133 0x06, 0x3c, 0x25, 0xf3,
134 0xfc, 0x1f, 0xdc, 0x2a },
138 /**< Size of match value. Do NOT split size and key! */
139 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
140 /**< Matcher value. This value is used as the mask or a key. */
142 .size = sizeof(matcher_mask.buf),
145 .size = sizeof(matcher_value.buf),
147 struct mlx5dv_flow_matcher_attr dv_attr = {
148 .type = IBV_FLOW_ATTR_NORMAL,
149 .match_mask = (void *)&matcher_mask,
151 void *match_m = matcher_mask.buf;
152 void *match_v = matcher_value.buf;
153 void *headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
154 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
156 const uint8_t l3_hash =
157 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
158 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
159 const uint8_t l4_hash =
160 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
161 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
162 enum { PRIO, CRITERIA, IP_VER_M, IP_VER_V, IP_PROT_M, IP_PROT_V, L3_BIT,
164 const uint8_t vars[RTE_DIM(priv->steer.rss)][END] = {
165 { 7, 0, 0, 0, 0, 0, 0, 0, 0 },
166 { 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0, 0,
167 MLX5_L3_PROT_TYPE_IPV4, 0, l3_hash },
168 { 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0, 0,
169 MLX5_L3_PROT_TYPE_IPV6, 0, l3_hash },
170 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
171 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_UDP,
173 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
174 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_TCP,
176 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
177 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_UDP,
179 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
180 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_TCP,
185 for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
186 dv_attr.priority = vars[i][PRIO];
187 dv_attr.match_criteria_enable = vars[i][CRITERIA];
188 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
190 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
192 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
194 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
196 tir_att.rx_hash_field_selector_outer.l3_prot_type =
198 tir_att.rx_hash_field_selector_outer.l4_prot_type =
200 tir_att.rx_hash_field_selector_outer.selected_fields =
202 priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
203 (priv->ctx, &dv_attr, priv->steer.tbl);
204 if (!priv->steer.rss[i].matcher) {
205 DRV_LOG(ERR, "Failed to create matcher %d.", i);
208 priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,
210 if (!priv->steer.rss[i].tir) {
211 DRV_LOG(ERR, "Failed to create TIR %d.", i);
214 priv->steer.rss[i].tir_action =
215 mlx5_glue->dv_create_flow_action_dest_devx_tir
216 (priv->steer.rss[i].tir->obj);
217 if (!priv->steer.rss[i].tir_action) {
218 DRV_LOG(ERR, "Failed to create TIR action %d.", i);
221 actions[0] = priv->steer.rss[i].tir_action;
222 priv->steer.rss[i].flow = mlx5_glue->dv_create_flow
223 (priv->steer.rss[i].matcher,
224 (void *)&matcher_value, 1, actions);
225 if (!priv->steer.rss[i].flow) {
226 DRV_LOG(ERR, "Failed to create flow %d.", i);
232 /* Resources will be freed by the caller. */
237 #endif /* HAVE_MLX5DV_DR */
241 mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
243 int ret = mlx5_vdpa_rqt_prepare(priv);
246 mlx5_vdpa_rss_flows_destroy(priv);
247 if (priv->steer.rqt) {
248 claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
249 priv->steer.rqt = NULL;
251 } else if (ret < 0) {
253 } else if (!priv->steer.rss[0].flow) {
254 ret = mlx5_vdpa_rss_flows_create(priv);
256 DRV_LOG(ERR, "Cannot create RSS flows.");
264 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
266 #ifdef HAVE_MLX5DV_DR
267 priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
268 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
269 if (!priv->steer.domain) {
270 DRV_LOG(ERR, "Failed to create Rx domain.");
273 priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
274 if (!priv->steer.tbl) {
275 DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
278 if (mlx5_vdpa_steer_update(priv))
282 mlx5_vdpa_steer_unset(priv);
287 #endif /* HAVE_MLX5DV_DR */