1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
4 #include <netinet/in.h>
6 #include <rte_malloc.h>
8 #include <rte_common.h>
10 #include <mlx5_common.h>
12 #include "mlx5_vdpa_utils.h"
13 #include "mlx5_vdpa.h"
16 mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
21 for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
22 if (priv->steer.rss[i].flow) {
23 claim_zero(mlx5_glue->dv_destroy_flow
24 (priv->steer.rss[i].flow));
25 priv->steer.rss[i].flow = NULL;
27 if (priv->steer.rss[i].tir_action) {
28 claim_zero(mlx5_glue->destroy_flow_action
29 (priv->steer.rss[i].tir_action));
30 priv->steer.rss[i].tir_action = NULL;
32 if (priv->steer.rss[i].tir) {
33 claim_zero(mlx5_devx_cmd_destroy
34 (priv->steer.rss[i].tir));
35 priv->steer.rss[i].tir = NULL;
37 if (priv->steer.rss[i].matcher) {
38 claim_zero(mlx5_glue->dv_destroy_flow_matcher
39 (priv->steer.rss[i].matcher));
40 priv->steer.rss[i].matcher = NULL;
43 if (priv->steer.tbl) {
44 claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
45 priv->steer.tbl = NULL;
47 if (priv->steer.domain) {
48 claim_zero(mlx5_glue->dr_destroy_domain(priv->steer.domain));
49 priv->steer.domain = NULL;
51 if (priv->steer.rqt) {
52 claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
53 priv->steer.rqt = NULL;
59 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
68 is_virtq_recvq(int virtq_index, int nr_vring)
70 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
75 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
77 mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
79 struct mlx5_vdpa_virtq *virtq;
80 uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
81 1 << priv->log_max_rqt_size);
82 struct mlx5_devx_rqt_attr *attr = rte_zmalloc(__func__, sizeof(*attr)
89 DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
93 SLIST_FOREACH(virtq, &priv->virtq_list, next) {
94 if (is_virtq_recvq(virtq->index, priv->nr_virtqs) &&
96 attr->rq_list[i] = virtq->virtq->id;
100 for (j = 0; i != rqt_n; ++i, ++j)
101 attr->rq_list[i] = attr->rq_list[j];
102 attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
103 attr->rqt_max_size = rqt_n;
104 attr->rqt_actual_size = rqt_n;
105 if (!priv->steer.rqt) {
106 priv->steer.rqt = mlx5_devx_cmd_create_rqt(priv->ctx, attr);
107 if (!priv->steer.rqt) {
108 DRV_LOG(ERR, "Failed to create RQT.");
112 ret = mlx5_devx_cmd_modify_rqt(priv->steer.rqt, attr);
114 DRV_LOG(ERR, "Failed to modify RQT.");
121 mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
123 struct mlx5_vdpa_priv *priv = virtq->priv;
126 DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index,
127 virtq->enable ? "en" : "dis", enable ? "en" : "dis");
128 if (virtq->enable == !!enable)
130 virtq->enable = !!enable;
131 if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
132 ret = mlx5_vdpa_rqt_prepare(priv);
134 virtq->enable = !enable;
139 static int __rte_unused
140 mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
142 #ifdef HAVE_MLX5DV_DR
143 struct mlx5_devx_tir_attr tir_att = {
144 .disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT,
145 .rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ,
146 .transport_domain = priv->td->id,
147 .indirect_table = priv->steer.rqt->id,
148 .rx_hash_symmetric = 1,
149 .rx_hash_toeplitz_key = { 0x2c, 0xc6, 0x81, 0xd1,
150 0x5b, 0xdb, 0xf4, 0xf7,
151 0xfc, 0xa2, 0x83, 0x19,
152 0xdb, 0x1a, 0x3e, 0x94,
153 0x6b, 0x9e, 0x38, 0xd9,
154 0x2c, 0x9c, 0x03, 0xd1,
155 0xad, 0x99, 0x44, 0xa7,
156 0xd9, 0x56, 0x3d, 0x59,
157 0x06, 0x3c, 0x25, 0xf3,
158 0xfc, 0x1f, 0xdc, 0x2a },
162 /**< Size of match value. Do NOT split size and key! */
163 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
164 /**< Matcher value. This value is used as the mask or a key. */
166 .size = sizeof(matcher_mask.buf),
169 .size = sizeof(matcher_value.buf),
171 struct mlx5dv_flow_matcher_attr dv_attr = {
172 .type = IBV_FLOW_ATTR_NORMAL,
173 .match_mask = (void *)&matcher_mask,
175 void *match_m = matcher_mask.buf;
176 void *match_v = matcher_value.buf;
177 void *headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
178 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
180 const uint8_t l3_hash =
181 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
182 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
183 const uint8_t l4_hash =
184 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
185 (1 << MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
186 enum { PRIO, CRITERIA, IP_VER_M, IP_VER_V, IP_PROT_M, IP_PROT_V, L3_BIT,
188 const uint8_t vars[RTE_DIM(priv->steer.rss)][END] = {
189 { 7, 0, 0, 0, 0, 0, 0, 0, 0 },
190 { 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0, 0,
191 MLX5_L3_PROT_TYPE_IPV4, 0, l3_hash },
192 { 6, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0, 0,
193 MLX5_L3_PROT_TYPE_IPV6, 0, l3_hash },
194 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
195 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_UDP,
197 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 4, 0xff,
198 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV4, MLX5_L4_PROT_TYPE_TCP,
200 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
201 IPPROTO_UDP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_UDP,
203 { 5, 1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT, 0xf, 6, 0xff,
204 IPPROTO_TCP, MLX5_L3_PROT_TYPE_IPV6, MLX5_L4_PROT_TYPE_TCP,
209 for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
210 dv_attr.priority = vars[i][PRIO];
211 dv_attr.match_criteria_enable = vars[i][CRITERIA];
212 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
214 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
216 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
218 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
220 tir_att.rx_hash_field_selector_outer.l3_prot_type =
222 tir_att.rx_hash_field_selector_outer.l4_prot_type =
224 tir_att.rx_hash_field_selector_outer.selected_fields =
226 priv->steer.rss[i].matcher = mlx5_glue->dv_create_flow_matcher
227 (priv->ctx, &dv_attr, priv->steer.tbl);
228 if (!priv->steer.rss[i].matcher) {
229 DRV_LOG(ERR, "Failed to create matcher %d.", i);
232 priv->steer.rss[i].tir = mlx5_devx_cmd_create_tir(priv->ctx,
234 if (!priv->steer.rss[i].tir) {
235 DRV_LOG(ERR, "Failed to create TIR %d.", i);
238 priv->steer.rss[i].tir_action =
239 mlx5_glue->dv_create_flow_action_dest_devx_tir
240 (priv->steer.rss[i].tir->obj);
241 if (!priv->steer.rss[i].tir_action) {
242 DRV_LOG(ERR, "Failed to create TIR action %d.", i);
245 actions[0] = priv->steer.rss[i].tir_action;
246 priv->steer.rss[i].flow = mlx5_glue->dv_create_flow
247 (priv->steer.rss[i].matcher,
248 (void *)&matcher_value, 1, actions);
249 if (!priv->steer.rss[i].flow) {
250 DRV_LOG(ERR, "Failed to create flow %d.", i);
256 /* Resources will be freed by the caller. */
261 #endif /* HAVE_MLX5DV_DR */
265 mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
267 #ifdef HAVE_MLX5DV_DR
268 if (mlx5_vdpa_rqt_prepare(priv))
270 priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
271 MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
272 if (!priv->steer.domain) {
273 DRV_LOG(ERR, "Failed to create Rx domain.");
276 priv->steer.tbl = mlx5_glue->dr_create_flow_tbl(priv->steer.domain, 0);
277 if (!priv->steer.tbl) {
278 DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
281 if (mlx5_vdpa_rss_flows_create(priv))
285 mlx5_vdpa_steer_unset(priv);
290 #endif /* HAVE_MLX5DV_DR */