1c88f44ac2bd1fa73983d321a80af30eda0b3d99
[dpdk.git] / drivers / common / mlx5 / mlx5_devx_cmds.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright 2018 Mellanox Technologies, Ltd */
3
4 #include <unistd.h>
5
6 #include <rte_errno.h>
7 #include <rte_malloc.h>
8
9 #include "mlx5_prm.h"
10 #include "mlx5_devx_cmds.h"
11 #include "mlx5_common_utils.h"
12
13
14 /**
15  * Perform read access to the registers. Reads data from register
16  * and writes ones to the specified buffer.
17  *
18  * @param[in] ctx
19  *   Context returned from mlx5 open_device() glue function.
20  * @param[in] reg_id
21  *   Register identifier according to the PRM.
22  * @param[in] arg
23  *   Register access auxiliary parameter according to the PRM.
24  * @param[out] data
25  *   Pointer to the buffer to store read data.
26  * @param[in] dw_cnt
27  *   Buffer size in double words.
28  *
29  * @return
30  *   0 on success, a negative value otherwise.
31  */
32 int
33 mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg,
34                             uint32_t *data, uint32_t dw_cnt)
35 {
36         uint32_t in[MLX5_ST_SZ_DW(access_register_in)]   = {0};
37         uint32_t out[MLX5_ST_SZ_DW(access_register_out) +
38                      MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0};
39         int status, rc;
40
41         MLX5_ASSERT(data && dw_cnt);
42         MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX);
43         if (dw_cnt  > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) {
44                 DRV_LOG(ERR, "Not enough  buffer for register read data");
45                 return -1;
46         }
47         MLX5_SET(access_register_in, in, opcode,
48                  MLX5_CMD_OP_ACCESS_REGISTER_USER);
49         MLX5_SET(access_register_in, in, op_mod,
50                                         MLX5_ACCESS_REGISTER_IN_OP_MOD_READ);
51         MLX5_SET(access_register_in, in, register_id, reg_id);
52         MLX5_SET(access_register_in, in, argument, arg);
53         rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out,
54                                          MLX5_ST_SZ_DW(access_register_out) *
55                                          sizeof(uint32_t) + dw_cnt);
56         if (rc)
57                 goto error;
58         status = MLX5_GET(access_register_out, out, status);
59         if (status) {
60                 int syndrome = MLX5_GET(access_register_out, out, syndrome);
61
62                 DRV_LOG(DEBUG, "Failed to access NIC register 0x%X, "
63                                "status %x, syndrome = %x",
64                                reg_id, status, syndrome);
65                 return -1;
66         }
67         memcpy(data, &out[MLX5_ST_SZ_DW(access_register_out)],
68                dw_cnt * sizeof(uint32_t));
69         return 0;
70 error:
71         rc = (rc > 0) ? -rc : rc;
72         return rc;
73 }
74
75 /**
76  * Allocate flow counters via devx interface.
77  *
78  * @param[in] ctx
79  *   Context returned from mlx5 open_device() glue function.
80  * @param dcs
81  *   Pointer to counters properties structure to be filled by the routine.
82  * @param bulk_n_128
83  *   Bulk counter numbers in 128 counters units.
84  *
85  * @return
86  *   Pointer to counter object on success, a negative value otherwise and
87  *   rte_errno is set.
88  */
89 struct mlx5_devx_obj *
90 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128)
91 {
92         struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
93         uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
94         uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
95
96         if (!dcs) {
97                 rte_errno = ENOMEM;
98                 return NULL;
99         }
100         MLX5_SET(alloc_flow_counter_in, in, opcode,
101                  MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
102         MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
103         dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
104                                               sizeof(in), out, sizeof(out));
105         if (!dcs->obj) {
106                 DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
107                 rte_errno = errno;
108                 rte_free(dcs);
109                 return NULL;
110         }
111         dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
112         return dcs;
113 }
114
115 /**
116  * Query flow counters values.
117  *
118  * @param[in] dcs
119  *   devx object that was obtained from mlx5_devx_cmd_fc_alloc.
120  * @param[in] clear
121  *   Whether hardware should clear the counters after the query or not.
122  * @param[in] n_counters
123  *   0 in case of 1 counter to read, otherwise the counter number to read.
124  *  @param pkts
125  *   The number of packets that matched the flow.
126  *  @param bytes
127  *    The number of bytes that matched the flow.
128  *  @param mkey
129  *   The mkey key for batch query.
130  *  @param addr
131  *    The address in the mkey range for batch query.
132  *  @param cmd_comp
133  *   The completion object for asynchronous batch query.
134  *  @param async_id
135  *    The ID to be returned in the asynchronous batch query response.
136  *
137  * @return
138  *   0 on success, a negative value otherwise.
139  */
140 int
141 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
142                                  int clear, uint32_t n_counters,
143                                  uint64_t *pkts, uint64_t *bytes,
144                                  uint32_t mkey, void *addr,
145                                  void *cmd_comp,
146                                  uint64_t async_id)
147 {
148         int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
149                         MLX5_ST_SZ_BYTES(traffic_counter);
150         uint32_t out[out_len];
151         uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
152         void *stats;
153         int rc;
154
155         MLX5_SET(query_flow_counter_in, in, opcode,
156                  MLX5_CMD_OP_QUERY_FLOW_COUNTER);
157         MLX5_SET(query_flow_counter_in, in, op_mod, 0);
158         MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
159         MLX5_SET(query_flow_counter_in, in, clear, !!clear);
160
161         if (n_counters) {
162                 MLX5_SET(query_flow_counter_in, in, num_of_counters,
163                          n_counters);
164                 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
165                 MLX5_SET(query_flow_counter_in, in, mkey, mkey);
166                 MLX5_SET64(query_flow_counter_in, in, address,
167                            (uint64_t)(uintptr_t)addr);
168         }
169         if (!cmd_comp)
170                 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
171                                                out_len);
172         else
173                 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
174                                                      out_len, async_id,
175                                                      cmd_comp);
176         if (rc) {
177                 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
178                 rte_errno = rc;
179                 return -rc;
180         }
181         if (!n_counters) {
182                 stats = MLX5_ADDR_OF(query_flow_counter_out,
183                                      out, flow_statistics);
184                 *pkts = MLX5_GET64(traffic_counter, stats, packets);
185                 *bytes = MLX5_GET64(traffic_counter, stats, octets);
186         }
187         return 0;
188 }
189
190 /**
191  * Create a new mkey.
192  *
193  * @param[in] ctx
194  *   Context returned from mlx5 open_device() glue function.
195  * @param[in] attr
196  *   Attributes of the requested mkey.
197  *
198  * @return
199  *   Pointer to Devx mkey on success, a negative value otherwise and rte_errno
200  *   is set.
201  */
202 struct mlx5_devx_obj *
203 mlx5_devx_cmd_mkey_create(void *ctx,
204                           struct mlx5_devx_mkey_attr *attr)
205 {
206         struct mlx5_klm *klm_array = attr->klm_array;
207         int klm_num = attr->klm_num;
208         int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) +
209                      (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm);
210         uint32_t in[in_size_dw];
211         uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
212         void *mkc;
213         struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
214         size_t pgsize;
215         uint32_t translation_size;
216
217         if (!mkey) {
218                 rte_errno = ENOMEM;
219                 return NULL;
220         }
221         memset(in, 0, in_size_dw * 4);
222         pgsize = sysconf(_SC_PAGESIZE);
223         MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
224         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
225         if (klm_num > 0) {
226                 int i;
227                 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in,
228                                                        klm_pas_mtt);
229                 translation_size = RTE_ALIGN(klm_num, 4);
230                 for (i = 0; i < klm_num; i++) {
231                         MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count);
232                         MLX5_SET(klm, klm, mkey, klm_array[i].mkey);
233                         MLX5_SET64(klm, klm, address, klm_array[i].address);
234                         klm += MLX5_ST_SZ_BYTES(klm);
235                 }
236                 for (; i < (int)translation_size; i++) {
237                         MLX5_SET(klm, klm, mkey, 0x0);
238                         MLX5_SET64(klm, klm, address, 0x0);
239                         klm += MLX5_ST_SZ_BYTES(klm);
240                 }
241                 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ?
242                          MLX5_MKC_ACCESS_MODE_KLM_FBS :
243                          MLX5_MKC_ACCESS_MODE_KLM);
244                 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size);
245         } else {
246                 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
247                 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
248                 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
249         }
250         MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
251                  translation_size);
252         MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
253         MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access);
254         MLX5_SET(mkc, mkc, lw, 0x1);
255         MLX5_SET(mkc, mkc, lr, 0x1);
256         MLX5_SET(mkc, mkc, qpn, 0xffffff);
257         MLX5_SET(mkc, mkc, pd, attr->pd);
258         MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
259         MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
260         if (attr->relaxed_ordering == 1) {
261                 MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
262                 MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
263         }
264         MLX5_SET64(mkc, mkc, start_addr, attr->addr);
265         MLX5_SET64(mkc, mkc, len, attr->size);
266         mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
267                                                sizeof(out));
268         if (!mkey->obj) {
269                 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n",
270                         klm_num ? "an in" : "a ", errno);
271                 rte_errno = errno;
272                 rte_free(mkey);
273                 return NULL;
274         }
275         mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
276         mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
277         return mkey;
278 }
279
280 /**
281  * Get status of devx command response.
282  * Mainly used for asynchronous commands.
283  *
284  * @param[in] out
285  *   The out response buffer.
286  *
287  * @return
288  *   0 on success, non-zero value otherwise.
289  */
290 int
291 mlx5_devx_get_out_command_status(void *out)
292 {
293         int status;
294
295         if (!out)
296                 return -EINVAL;
297         status = MLX5_GET(query_flow_counter_out, out, status);
298         if (status) {
299                 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
300
301                 DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
302                         syndrome);
303         }
304         return status;
305 }
306
307 /**
308  * Destroy any object allocated by a Devx API.
309  *
310  * @param[in] obj
311  *   Pointer to a general object.
312  *
313  * @return
314  *   0 on success, a negative value otherwise.
315  */
316 int
317 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
318 {
319         int ret;
320
321         if (!obj)
322                 return 0;
323         ret =  mlx5_glue->devx_obj_destroy(obj->obj);
324         rte_free(obj);
325         return ret;
326 }
327
328 /**
329  * Query NIC vport context.
330  * Fills minimal inline attribute.
331  *
332  * @param[in] ctx
333  *   ibv contexts returned from mlx5dv_open_device.
334  * @param[in] vport
335  *   vport index
336  * @param[out] attr
337  *   Attributes device values.
338  *
339  * @return
340  *   0 on success, a negative value otherwise.
341  */
342 static int
343 mlx5_devx_cmd_query_nic_vport_context(void *ctx,
344                                       unsigned int vport,
345                                       struct mlx5_hca_attr *attr)
346 {
347         uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
348         uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
349         void *vctx;
350         int status, syndrome, rc;
351
352         /* Query NIC vport context to determine inline mode. */
353         MLX5_SET(query_nic_vport_context_in, in, opcode,
354                  MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
355         MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
356         if (vport)
357                 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
358         rc = mlx5_glue->devx_general_cmd(ctx,
359                                          in, sizeof(in),
360                                          out, sizeof(out));
361         if (rc)
362                 goto error;
363         status = MLX5_GET(query_nic_vport_context_out, out, status);
364         syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
365         if (status) {
366                 DRV_LOG(DEBUG, "Failed to query NIC vport context, "
367                         "status %x, syndrome = %x",
368                         status, syndrome);
369                 return -1;
370         }
371         vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
372                             nic_vport_context);
373         attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
374                                            min_wqe_inline_mode);
375         return 0;
376 error:
377         rc = (rc > 0) ? -rc : rc;
378         return rc;
379 }
380
381 /**
382  * Query NIC vDPA attributes.
383  *
384  * @param[in] ctx
385  *   Context returned from mlx5 open_device() glue function.
386  * @param[out] vdpa_attr
387  *   vDPA Attributes structure to fill.
388  */
389 static void
390 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
391                                   struct mlx5_hca_vdpa_attr *vdpa_attr)
392 {
393         uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
394         uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
395         void *hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
396         int status, syndrome, rc;
397
398         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
399         MLX5_SET(query_hca_cap_in, in, op_mod,
400                  MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
401                  MLX5_HCA_CAP_OPMOD_GET_CUR);
402         rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
403         status = MLX5_GET(query_hca_cap_out, out, status);
404         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
405         if (rc || status) {
406                 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities,"
407                         " status %x, syndrome = %x", status, syndrome);
408                 vdpa_attr->valid = 0;
409         } else {
410                 vdpa_attr->valid = 1;
411                 vdpa_attr->desc_tunnel_offload_type =
412                         MLX5_GET(virtio_emulation_cap, hcattr,
413                                  desc_tunnel_offload_type);
414                 vdpa_attr->eth_frame_offload_type =
415                         MLX5_GET(virtio_emulation_cap, hcattr,
416                                  eth_frame_offload_type);
417                 vdpa_attr->virtio_version_1_0 =
418                         MLX5_GET(virtio_emulation_cap, hcattr,
419                                  virtio_version_1_0);
420                 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr,
421                                                tso_ipv4);
422                 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr,
423                                                tso_ipv6);
424                 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
425                                               tx_csum);
426                 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
427                                               rx_csum);
428                 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr,
429                                                  event_mode);
430                 vdpa_attr->virtio_queue_type =
431                         MLX5_GET(virtio_emulation_cap, hcattr,
432                                  virtio_queue_type);
433                 vdpa_attr->log_doorbell_stride =
434                         MLX5_GET(virtio_emulation_cap, hcattr,
435                                  log_doorbell_stride);
436                 vdpa_attr->log_doorbell_bar_size =
437                         MLX5_GET(virtio_emulation_cap, hcattr,
438                                  log_doorbell_bar_size);
439                 vdpa_attr->doorbell_bar_offset =
440                         MLX5_GET64(virtio_emulation_cap, hcattr,
441                                    doorbell_bar_offset);
442                 vdpa_attr->max_num_virtio_queues =
443                         MLX5_GET(virtio_emulation_cap, hcattr,
444                                  max_num_virtio_queues);
445                 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr,
446                                                  umem_1_buffer_param_a);
447                 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr,
448                                                  umem_1_buffer_param_b);
449                 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr,
450                                                  umem_2_buffer_param_a);
451                 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr,
452                                                  umem_2_buffer_param_b);
453                 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr,
454                                                  umem_3_buffer_param_a);
455                 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr,
456                                                  umem_3_buffer_param_b);
457         }
458 }
459
460 int
461 mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
462                                   uint32_t ids[], uint32_t num)
463 {
464         uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
465         uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0};
466         void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr);
467         void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex);
468         void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
469         int ret;
470         uint32_t idx = 0;
471         uint32_t i;
472
473         if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) {
474                 rte_errno = EINVAL;
475                 DRV_LOG(ERR, "Too many sample IDs to be fetched.");
476                 return -rte_errno;
477         }
478         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
479                  MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
480         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
481                  MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
482         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id);
483         ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in),
484                                         out, sizeof(out));
485         if (ret) {
486                 rte_errno = ret;
487                 DRV_LOG(ERR, "Failed to query sample IDs with object %p.",
488                         (void *)flex_obj);
489                 return -rte_errno;
490         }
491         for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
492                 void *s_off = (void *)((char *)sample + i *
493                               MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
494                 uint32_t en;
495
496                 en = MLX5_GET(parse_graph_flow_match_sample, s_off,
497                               flow_match_sample_en);
498                 if (!en)
499                         continue;
500                 ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off,
501                                   flow_match_sample_field_id);
502         }
503         if (num != idx) {
504                 rte_errno = EINVAL;
505                 DRV_LOG(ERR, "Number of sample IDs are not as expected.");
506                 return -rte_errno;
507         }
508         return ret;
509 }
510
511
512 struct mlx5_devx_obj *
513 mlx5_devx_cmd_create_flex_parser(void *ctx,
514                               struct mlx5_devx_graph_node_attr *data)
515 {
516         uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
517         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
518         void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr);
519         void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex);
520         void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
521         void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc);
522         void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc);
523         struct mlx5_devx_obj *parse_flex_obj = NULL;
524         uint32_t i;
525
526         parse_flex_obj = rte_calloc(__func__, 1, sizeof(*parse_flex_obj), 0);
527         if (!parse_flex_obj) {
528                 DRV_LOG(ERR, "Failed to allocate flex parser data");
529                 rte_errno = ENOMEM;
530                 rte_free(in);
531                 return NULL;
532         }
533         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
534                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
535         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
536                  MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
537         MLX5_SET(parse_graph_flex, flex, header_length_mode,
538                  data->header_length_mode);
539         MLX5_SET(parse_graph_flex, flex, header_length_base_value,
540                  data->header_length_base_value);
541         MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
542                  data->header_length_field_offset);
543         MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
544                  data->header_length_field_shift);
545         MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
546                  data->header_length_field_mask);
547         for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
548                 struct mlx5_devx_match_sample_attr *s = &data->sample[i];
549                 void *s_off = (void *)((char *)sample + i *
550                               MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
551
552                 if (!s->flow_match_sample_en)
553                         continue;
554                 MLX5_SET(parse_graph_flow_match_sample, s_off,
555                          flow_match_sample_en, !!s->flow_match_sample_en);
556                 MLX5_SET(parse_graph_flow_match_sample, s_off,
557                          flow_match_sample_field_offset,
558                          s->flow_match_sample_field_offset);
559                 MLX5_SET(parse_graph_flow_match_sample, s_off,
560                          flow_match_sample_offset_mode,
561                          s->flow_match_sample_offset_mode);
562                 MLX5_SET(parse_graph_flow_match_sample, s_off,
563                          flow_match_sample_field_offset_mask,
564                          s->flow_match_sample_field_offset_mask);
565                 MLX5_SET(parse_graph_flow_match_sample, s_off,
566                          flow_match_sample_field_offset_shift,
567                          s->flow_match_sample_field_offset_shift);
568                 MLX5_SET(parse_graph_flow_match_sample, s_off,
569                          flow_match_sample_field_base_offset,
570                          s->flow_match_sample_field_base_offset);
571                 MLX5_SET(parse_graph_flow_match_sample, s_off,
572                          flow_match_sample_tunnel_mode,
573                          s->flow_match_sample_tunnel_mode);
574         }
575         for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) {
576                 struct mlx5_devx_graph_arc_attr *ia = &data->in[i];
577                 struct mlx5_devx_graph_arc_attr *oa = &data->out[i];
578                 void *in_off = (void *)((char *)in_arc + i *
579                               MLX5_ST_SZ_BYTES(parse_graph_arc));
580                 void *out_off = (void *)((char *)out_arc + i *
581                               MLX5_ST_SZ_BYTES(parse_graph_arc));
582
583                 if (ia->arc_parse_graph_node != 0) {
584                         MLX5_SET(parse_graph_arc, in_off,
585                                  compare_condition_value,
586                                  ia->compare_condition_value);
587                         MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel,
588                                  ia->start_inner_tunnel);
589                         MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node,
590                                  ia->arc_parse_graph_node);
591                         MLX5_SET(parse_graph_arc, in_off,
592                                  parse_graph_node_handle,
593                                  ia->parse_graph_node_handle);
594                 }
595                 if (oa->arc_parse_graph_node != 0) {
596                         MLX5_SET(parse_graph_arc, out_off,
597                                  compare_condition_value,
598                                  oa->compare_condition_value);
599                         MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel,
600                                  oa->start_inner_tunnel);
601                         MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node,
602                                  oa->arc_parse_graph_node);
603                         MLX5_SET(parse_graph_arc, out_off,
604                                  parse_graph_node_handle,
605                                  oa->parse_graph_node_handle);
606                 }
607         }
608         parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
609                                                          out, sizeof(out));
610         if (!parse_flex_obj->obj) {
611                 rte_errno = errno;
612                 DRV_LOG(ERR, "Failed to create FLEX PARSE GRAPH object "
613                         "by using DevX.");
614                 rte_free(parse_flex_obj);
615                 return NULL;
616         }
617         parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
618         return parse_flex_obj;
619 }
620
621 /**
622  * Query HCA attributes.
623  * Using those attributes we can check on run time if the device
624  * is having the required capabilities.
625  *
626  * @param[in] ctx
627  *   Context returned from mlx5 open_device() glue function.
628  * @param[out] attr
629  *   Attributes device values.
630  *
631  * @return
632  *   0 on success, a negative value otherwise.
633  */
634 int
635 mlx5_devx_cmd_query_hca_attr(void *ctx,
636                              struct mlx5_hca_attr *attr)
637 {
638         uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
639         uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
640         void *hcattr;
641         int status, syndrome, rc, i;
642
643         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
644         MLX5_SET(query_hca_cap_in, in, op_mod,
645                  MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
646                  MLX5_HCA_CAP_OPMOD_GET_CUR);
647
648         rc = mlx5_glue->devx_general_cmd(ctx,
649                                          in, sizeof(in), out, sizeof(out));
650         if (rc)
651                 goto error;
652         status = MLX5_GET(query_hca_cap_out, out, status);
653         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
654         if (status) {
655                 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
656                         "status %x, syndrome = %x",
657                         status, syndrome);
658                 return -1;
659         }
660         hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
661         attr->flow_counter_bulk_alloc_bitmap =
662                         MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
663         attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
664                                             flow_counters_dump);
665         attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr,
666                                           log_max_rqt_size);
667         attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
668         attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
669         attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
670                                                 log_max_hairpin_queues);
671         attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
672                                                     log_max_hairpin_wq_data_sz);
673         attr->log_max_hairpin_num_packets = MLX5_GET
674                 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
675         attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
676         attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
677                         relaxed_ordering_write);
678         attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
679                         relaxed_ordering_read);
680         attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
681                                           eth_net_offloads);
682         attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
683         attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
684                                                flex_parser_protocols);
685         attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
686         attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
687                                          general_obj_types) &
688                               MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
689         attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
690                                                         general_obj_types) &
691                                   MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
692         attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr,
693                                          general_obj_types) &
694                               MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
695         attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr,
696                                           wqe_index_ignore_cap);
697         attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd);
698         attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
699         attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
700                                               log_max_static_sq_wq);
701         attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
702                                       device_frequency_khz);
703         attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp);
704         attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr,
705                                                regexp_num_of_engines);
706         if (attr->qos.sup) {
707                 MLX5_SET(query_hca_cap_in, in, op_mod,
708                          MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
709                          MLX5_HCA_CAP_OPMOD_GET_CUR);
710                 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
711                                                  out, sizeof(out));
712                 if (rc)
713                         goto error;
714                 if (status) {
715                         DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
716                                 " status %x, syndrome = %x",
717                                 status, syndrome);
718                         return -1;
719                 }
720                 hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
721                 attr->qos.srtcm_sup =
722                                 MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
723                 attr->qos.log_max_flow_meter =
724                                 MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
725                 attr->qos.flow_meter_reg_c_ids =
726                                 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
727                 attr->qos.flow_meter_reg_share =
728                                 MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
729                 attr->qos.packet_pacing =
730                                 MLX5_GET(qos_cap, hcattr, packet_pacing);
731                 attr->qos.wqe_rate_pp =
732                                 MLX5_GET(qos_cap, hcattr, wqe_rate_pp);
733         }
734         if (attr->vdpa.valid)
735                 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
736         if (!attr->eth_net_offloads)
737                 return 0;
738
739         /* Query HCA offloads for Ethernet protocol. */
740         memset(in, 0, sizeof(in));
741         memset(out, 0, sizeof(out));
742         MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
743         MLX5_SET(query_hca_cap_in, in, op_mod,
744                  MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
745                  MLX5_HCA_CAP_OPMOD_GET_CUR);
746
747         rc = mlx5_glue->devx_general_cmd(ctx,
748                                          in, sizeof(in),
749                                          out, sizeof(out));
750         if (rc) {
751                 attr->eth_net_offloads = 0;
752                 goto error;
753         }
754         status = MLX5_GET(query_hca_cap_out, out, status);
755         syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
756         if (status) {
757                 DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
758                         "status %x, syndrome = %x",
759                         status, syndrome);
760                 attr->eth_net_offloads = 0;
761                 return -1;
762         }
763         hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
764         attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
765                                          hcattr, wqe_vlan_insert);
766         attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
767                                  lro_cap);
768         attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
769                                         hcattr, tunnel_lro_gre);
770         attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
771                                           hcattr, tunnel_lro_vxlan);
772         attr->lro_max_msg_sz_mode = MLX5_GET
773                                         (per_protocol_networking_offload_caps,
774                                          hcattr, lro_max_msg_sz_mode);
775         for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
776                 attr->lro_timer_supported_periods[i] =
777                         MLX5_GET(per_protocol_networking_offload_caps, hcattr,
778                                  lro_timer_supported_periods[i]);
779         }
780         attr->tunnel_stateless_geneve_rx =
781                             MLX5_GET(per_protocol_networking_offload_caps,
782                                      hcattr, tunnel_stateless_geneve_rx);
783         attr->geneve_max_opt_len =
784                     MLX5_GET(per_protocol_networking_offload_caps,
785                              hcattr, max_geneve_opt_len);
786         attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
787                                          hcattr, wqe_inline_mode);
788         attr->tunnel_stateless_gtp = MLX5_GET
789                                         (per_protocol_networking_offload_caps,
790                                          hcattr, tunnel_stateless_gtp);
791         if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
792                 return 0;
793         if (attr->eth_virt) {
794                 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
795                 if (rc) {
796                         attr->eth_virt = 0;
797                         goto error;
798                 }
799         }
800         return 0;
801 error:
802         rc = (rc > 0) ? -rc : rc;
803         return rc;
804 }
805
806 /**
807  * Query TIS transport domain from QP verbs object using DevX API.
808  *
809  * @param[in] qp
810  *   Pointer to verbs QP returned by ibv_create_qp .
811  * @param[in] tis_num
812  *   TIS number of TIS to query.
813  * @param[out] tis_td
814  *   Pointer to TIS transport domain variable, to be set by the routine.
815  *
816  * @return
817  *   0 on success, a negative value otherwise.
818  */
819 int
820 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
821                               uint32_t *tis_td)
822 {
823 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
824         uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
825         uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
826         int rc;
827         void *tis_ctx;
828
829         MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
830         MLX5_SET(query_tis_in, in, tisn, tis_num);
831         rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
832         if (rc) {
833                 DRV_LOG(ERR, "Failed to query QP using DevX");
834                 return -rc;
835         };
836         tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
837         *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
838         return 0;
839 #else
840         (void)qp;
841         (void)tis_num;
842         (void)tis_td;
843         return -ENOTSUP;
844 #endif
845 }
846
847 /**
848  * Fill WQ data for DevX API command.
849  * Utility function for use when creating DevX objects containing a WQ.
850  *
851  * @param[in] wq_ctx
852  *   Pointer to WQ context to fill with data.
853  * @param [in] wq_attr
854  *   Pointer to WQ attributes structure to fill in WQ context.
855  */
856 static void
857 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
858 {
859         MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
860         MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
861         MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
862         MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
863         MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
864         MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
865         MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
866         MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
867         MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
868         MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
869         MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
870         MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
871         MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
872         MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
873         MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
874         MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
875         MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
876         MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
877         MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
878                  wq_attr->log_hairpin_num_packets);
879         MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
880         MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
881                  wq_attr->single_wqe_log_num_of_strides);
882         MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
883         MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
884                  wq_attr->single_stride_log_num_of_bytes);
885         MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
886         MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
887         MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
888 }
889
890 /**
891  * Create RQ using DevX API.
892  *
893  * @param[in] ctx
894  *   Context returned from mlx5 open_device() glue function.
895  * @param [in] rq_attr
896  *   Pointer to create RQ attributes structure.
897  * @param [in] socket
898  *   CPU socket ID for allocations.
899  *
900  * @return
901  *   The DevX object created, NULL otherwise and rte_errno is set.
902  */
903 struct mlx5_devx_obj *
904 mlx5_devx_cmd_create_rq(void *ctx,
905                         struct mlx5_devx_create_rq_attr *rq_attr,
906                         int socket)
907 {
908         uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
909         uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
910         void *rq_ctx, *wq_ctx;
911         struct mlx5_devx_wq_attr *wq_attr;
912         struct mlx5_devx_obj *rq = NULL;
913
914         rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
915         if (!rq) {
916                 DRV_LOG(ERR, "Failed to allocate RQ data");
917                 rte_errno = ENOMEM;
918                 return NULL;
919         }
920         MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
921         rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
922         MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
923         MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
924         MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
925         MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
926         MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
927         MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
928         MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
929         MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
930         MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
931         MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
932         MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
933         MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
934         wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
935         wq_attr = &rq_attr->wq_attr;
936         devx_cmd_fill_wq_data(wq_ctx, wq_attr);
937         rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
938                                                   out, sizeof(out));
939         if (!rq->obj) {
940                 DRV_LOG(ERR, "Failed to create RQ using DevX");
941                 rte_errno = errno;
942                 rte_free(rq);
943                 return NULL;
944         }
945         rq->id = MLX5_GET(create_rq_out, out, rqn);
946         return rq;
947 }
948
949 /**
950  * Modify RQ using DevX API.
951  *
952  * @param[in] rq
953  *   Pointer to RQ object structure.
954  * @param [in] rq_attr
955  *   Pointer to modify RQ attributes structure.
956  *
957  * @return
958  *   0 on success, a negative errno value otherwise and rte_errno is set.
959  */
960 int
961 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
962                         struct mlx5_devx_modify_rq_attr *rq_attr)
963 {
964         uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
965         uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
966         void *rq_ctx, *wq_ctx;
967         int ret;
968
969         MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
970         MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
971         MLX5_SET(modify_rq_in, in, rqn, rq->id);
972         MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
973         rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
974         MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
975         if (rq_attr->modify_bitmask &
976                         MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
977                 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
978         if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
979                 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
980         if (rq_attr->modify_bitmask &
981                         MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
982                 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
983         MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
984         MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
985         if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
986                 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
987                 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
988         }
989         ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
990                                          out, sizeof(out));
991         if (ret) {
992                 DRV_LOG(ERR, "Failed to modify RQ using DevX");
993                 rte_errno = errno;
994                 return -errno;
995         }
996         return ret;
997 }
998
999 /**
1000  * Create TIR using DevX API.
1001  *
1002  * @param[in] ctx
1003  *  Context returned from mlx5 open_device() glue function.
1004  * @param [in] tir_attr
1005  *   Pointer to TIR attributes structure.
1006  *
1007  * @return
1008  *   The DevX object created, NULL otherwise and rte_errno is set.
1009  */
1010 struct mlx5_devx_obj *
1011 mlx5_devx_cmd_create_tir(void *ctx,
1012                          struct mlx5_devx_tir_attr *tir_attr)
1013 {
1014         uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
1015         uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
1016         void *tir_ctx, *outer, *inner, *rss_key;
1017         struct mlx5_devx_obj *tir = NULL;
1018
1019         tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
1020         if (!tir) {
1021                 DRV_LOG(ERR, "Failed to allocate TIR data");
1022                 rte_errno = ENOMEM;
1023                 return NULL;
1024         }
1025         MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1026         tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
1027         MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
1028         MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
1029                  tir_attr->lro_timeout_period_usecs);
1030         MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
1031         MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
1032         MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
1033         MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
1034         MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
1035                  tir_attr->tunneled_offload_en);
1036         MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
1037         MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
1038         MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
1039         MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
1040         rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key);
1041         memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN);
1042         outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
1043         MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
1044                  tir_attr->rx_hash_field_selector_outer.l3_prot_type);
1045         MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
1046                  tir_attr->rx_hash_field_selector_outer.l4_prot_type);
1047         MLX5_SET(rx_hash_field_select, outer, selected_fields,
1048                  tir_attr->rx_hash_field_selector_outer.selected_fields);
1049         inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
1050         MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
1051                  tir_attr->rx_hash_field_selector_inner.l3_prot_type);
1052         MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
1053                  tir_attr->rx_hash_field_selector_inner.l4_prot_type);
1054         MLX5_SET(rx_hash_field_select, inner, selected_fields,
1055                  tir_attr->rx_hash_field_selector_inner.selected_fields);
1056         tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1057                                                    out, sizeof(out));
1058         if (!tir->obj) {
1059                 DRV_LOG(ERR, "Failed to create TIR using DevX");
1060                 rte_errno = errno;
1061                 rte_free(tir);
1062                 return NULL;
1063         }
1064         tir->id = MLX5_GET(create_tir_out, out, tirn);
1065         return tir;
1066 }
1067
1068 /**
1069  * Create RQT using DevX API.
1070  *
1071  * @param[in] ctx
1072  *   Context returned from mlx5 open_device() glue function.
1073  * @param [in] rqt_attr
1074  *   Pointer to RQT attributes structure.
1075  *
1076  * @return
1077  *   The DevX object created, NULL otherwise and rte_errno is set.
1078  */
1079 struct mlx5_devx_obj *
1080 mlx5_devx_cmd_create_rqt(void *ctx,
1081                          struct mlx5_devx_rqt_attr *rqt_attr)
1082 {
1083         uint32_t *in = NULL;
1084         uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
1085                          rqt_attr->rqt_actual_size * sizeof(uint32_t);
1086         uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
1087         void *rqt_ctx;
1088         struct mlx5_devx_obj *rqt = NULL;
1089         int i;
1090
1091         in = rte_calloc(__func__, 1, inlen, 0);
1092         if (!in) {
1093                 DRV_LOG(ERR, "Failed to allocate RQT IN data");
1094                 rte_errno = ENOMEM;
1095                 return NULL;
1096         }
1097         rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
1098         if (!rqt) {
1099                 DRV_LOG(ERR, "Failed to allocate RQT data");
1100                 rte_errno = ENOMEM;
1101                 rte_free(in);
1102                 return NULL;
1103         }
1104         MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
1105         rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1106         MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
1107         MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
1108         MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
1109         for (i = 0; i < rqt_attr->rqt_actual_size; i++)
1110                 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
1111         rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
1112         rte_free(in);
1113         if (!rqt->obj) {
1114                 DRV_LOG(ERR, "Failed to create RQT using DevX");
1115                 rte_errno = errno;
1116                 rte_free(rqt);
1117                 return NULL;
1118         }
1119         rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
1120         return rqt;
1121 }
1122
1123 /**
1124  * Modify RQT using DevX API.
1125  *
1126  * @param[in] rqt
1127  *   Pointer to RQT DevX object structure.
1128  * @param [in] rqt_attr
1129  *   Pointer to RQT attributes structure.
1130  *
1131  * @return
1132  *   0 on success, a negative errno value otherwise and rte_errno is set.
1133  */
1134 int
1135 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
1136                          struct mlx5_devx_rqt_attr *rqt_attr)
1137 {
1138         uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) +
1139                          rqt_attr->rqt_actual_size * sizeof(uint32_t);
1140         uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
1141         uint32_t *in = rte_calloc(__func__, 1, inlen, 0);
1142         void *rqt_ctx;
1143         int i;
1144         int ret;
1145
1146         if (!in) {
1147                 DRV_LOG(ERR, "Failed to allocate RQT modify IN data.");
1148                 rte_errno = ENOMEM;
1149                 return -ENOMEM;
1150         }
1151         MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
1152         MLX5_SET(modify_rqt_in, in, rqtn, rqt->id);
1153         MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
1154         rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context);
1155         MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
1156         MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
1157         MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
1158         for (i = 0; i < rqt_attr->rqt_actual_size; i++)
1159                 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
1160         ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out));
1161         rte_free(in);
1162         if (ret) {
1163                 DRV_LOG(ERR, "Failed to modify RQT using DevX.");
1164                 rte_errno = errno;
1165                 return -rte_errno;
1166         }
1167         return ret;
1168 }
1169
1170 /**
1171  * Create SQ using DevX API.
1172  *
1173  * @param[in] ctx
1174  *   Context returned from mlx5 open_device() glue function.
1175  * @param [in] sq_attr
1176  *   Pointer to SQ attributes structure.
1177  * @param [in] socket
1178  *   CPU socket ID for allocations.
1179  *
1180  * @return
1181  *   The DevX object created, NULL otherwise and rte_errno is set.
1182  **/
1183 struct mlx5_devx_obj *
1184 mlx5_devx_cmd_create_sq(void *ctx,
1185                         struct mlx5_devx_create_sq_attr *sq_attr)
1186 {
1187         uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
1188         uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
1189         void *sq_ctx;
1190         void *wq_ctx;
1191         struct mlx5_devx_wq_attr *wq_attr;
1192         struct mlx5_devx_obj *sq = NULL;
1193
1194         sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
1195         if (!sq) {
1196                 DRV_LOG(ERR, "Failed to allocate SQ data");
1197                 rte_errno = ENOMEM;
1198                 return NULL;
1199         }
1200         MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
1201         sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
1202         MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
1203         MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
1204         MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
1205         MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
1206         MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
1207                  sq_attr->flush_in_error_en);
1208         MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
1209                  sq_attr->min_wqe_inline_mode);
1210         MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
1211         MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
1212         MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
1213         MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
1214         MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire);
1215         MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq);
1216         MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
1217         MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
1218         MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
1219                  sq_attr->packet_pacing_rate_limit_index);
1220         MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
1221         MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
1222         wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
1223         wq_attr = &sq_attr->wq_attr;
1224         devx_cmd_fill_wq_data(wq_ctx, wq_attr);
1225         sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1226                                              out, sizeof(out));
1227         if (!sq->obj) {
1228                 DRV_LOG(ERR, "Failed to create SQ using DevX");
1229                 rte_errno = errno;
1230                 rte_free(sq);
1231                 return NULL;
1232         }
1233         sq->id = MLX5_GET(create_sq_out, out, sqn);
1234         return sq;
1235 }
1236
1237 /**
1238  * Modify SQ using DevX API.
1239  *
1240  * @param[in] sq
1241  *   Pointer to SQ object structure.
1242  * @param [in] sq_attr
1243  *   Pointer to SQ attributes structure.
1244  *
1245  * @return
1246  *   0 on success, a negative errno value otherwise and rte_errno is set.
1247  */
1248 int
1249 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
1250                         struct mlx5_devx_modify_sq_attr *sq_attr)
1251 {
1252         uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
1253         uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
1254         void *sq_ctx;
1255         int ret;
1256
1257         MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
1258         MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
1259         MLX5_SET(modify_sq_in, in, sqn, sq->id);
1260         sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1261         MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
1262         MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
1263         MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
1264         ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
1265                                          out, sizeof(out));
1266         if (ret) {
1267                 DRV_LOG(ERR, "Failed to modify SQ using DevX");
1268                 rte_errno = errno;
1269                 return -rte_errno;
1270         }
1271         return ret;
1272 }
1273
1274 /**
1275  * Create TIS using DevX API.
1276  *
1277  * @param[in] ctx
1278  *   Context returned from mlx5 open_device() glue function.
1279  * @param [in] tis_attr
1280  *   Pointer to TIS attributes structure.
1281  *
1282  * @return
1283  *   The DevX object created, NULL otherwise and rte_errno is set.
1284  */
1285 struct mlx5_devx_obj *
1286 mlx5_devx_cmd_create_tis(void *ctx,
1287                          struct mlx5_devx_tis_attr *tis_attr)
1288 {
1289         uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1290         uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
1291         struct mlx5_devx_obj *tis = NULL;
1292         void *tis_ctx;
1293
1294         tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
1295         if (!tis) {
1296                 DRV_LOG(ERR, "Failed to allocate TIS object");
1297                 rte_errno = ENOMEM;
1298                 return NULL;
1299         }
1300         MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
1301         tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
1302         MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
1303                  tis_attr->strict_lag_tx_port_affinity);
1304         MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
1305                  tis_attr->strict_lag_tx_port_affinity);
1306         MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
1307         MLX5_SET(tisc, tis_ctx, transport_domain,
1308                  tis_attr->transport_domain);
1309         tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1310                                               out, sizeof(out));
1311         if (!tis->obj) {
1312                 DRV_LOG(ERR, "Failed to create TIS using DevX");
1313                 rte_errno = errno;
1314                 rte_free(tis);
1315                 return NULL;
1316         }
1317         tis->id = MLX5_GET(create_tis_out, out, tisn);
1318         return tis;
1319 }
1320
1321 /**
1322  * Create transport domain using DevX API.
1323  *
1324  * @param[in] ctx
1325  *   Context returned from mlx5 open_device() glue function.
1326  * @return
1327  *   The DevX object created, NULL otherwise and rte_errno is set.
1328  */
1329 struct mlx5_devx_obj *
1330 mlx5_devx_cmd_create_td(void *ctx)
1331 {
1332         uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
1333         uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
1334         struct mlx5_devx_obj *td = NULL;
1335
1336         td = rte_calloc(__func__, 1, sizeof(*td), 0);
1337         if (!td) {
1338                 DRV_LOG(ERR, "Failed to allocate TD object");
1339                 rte_errno = ENOMEM;
1340                 return NULL;
1341         }
1342         MLX5_SET(alloc_transport_domain_in, in, opcode,
1343                  MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
1344         td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1345                                              out, sizeof(out));
1346         if (!td->obj) {
1347                 DRV_LOG(ERR, "Failed to create TIS using DevX");
1348                 rte_errno = errno;
1349                 rte_free(td);
1350                 return NULL;
1351         }
1352         td->id = MLX5_GET(alloc_transport_domain_out, out,
1353                            transport_domain);
1354         return td;
1355 }
1356
1357 /**
1358  * Dump all flows to file.
1359  *
1360  * @param[in] fdb_domain
1361  *   FDB domain.
1362  * @param[in] rx_domain
1363  *   RX domain.
1364  * @param[in] tx_domain
1365  *   TX domain.
1366  * @param[out] file
1367  *   Pointer to file stream.
1368  *
1369  * @return
1370  *   0 on success, a nagative value otherwise.
1371  */
1372 int
1373 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
1374                         void *rx_domain __rte_unused,
1375                         void *tx_domain __rte_unused, FILE *file __rte_unused)
1376 {
1377         int ret = 0;
1378
1379 #ifdef HAVE_MLX5_DR_FLOW_DUMP
1380         if (fdb_domain) {
1381                 ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
1382                 if (ret)
1383                         return ret;
1384         }
1385         MLX5_ASSERT(rx_domain);
1386         ret = mlx5_glue->dr_dump_domain(file, rx_domain);
1387         if (ret)
1388                 return ret;
1389         MLX5_ASSERT(tx_domain);
1390         ret = mlx5_glue->dr_dump_domain(file, tx_domain);
1391 #else
1392         ret = ENOTSUP;
1393 #endif
1394         return -ret;
1395 }
1396
1397 /*
1398  * Create CQ using DevX API.
1399  *
1400  * @param[in] ctx
1401  *   Context returned from mlx5 open_device() glue function.
1402  * @param [in] attr
1403  *   Pointer to CQ attributes structure.
1404  *
1405  * @return
1406  *   The DevX object created, NULL otherwise and rte_errno is set.
1407  */
1408 struct mlx5_devx_obj *
1409 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr)
1410 {
1411         uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
1412         uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
1413         struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj),
1414                                                    0);
1415         void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1416
1417         if (!cq_obj) {
1418                 DRV_LOG(ERR, "Failed to allocate CQ object memory.");
1419                 rte_errno = ENOMEM;
1420                 return NULL;
1421         }
1422         MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
1423         if (attr->db_umem_valid) {
1424                 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid);
1425                 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id);
1426                 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset);
1427         } else {
1428                 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
1429         }
1430         MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
1431         MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
1432         MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
1433         MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
1434         MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size -
1435                  MLX5_ADAPTER_PAGE_SHIFT);
1436         MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
1437         MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
1438         if (attr->q_umem_valid) {
1439                 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
1440                 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
1441                 MLX5_SET64(create_cq_in, in, cq_umem_offset,
1442                            attr->q_umem_offset);
1443         }
1444         cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1445                                                  sizeof(out));
1446         if (!cq_obj->obj) {
1447                 rte_errno = errno;
1448                 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
1449                 rte_free(cq_obj);
1450                 return NULL;
1451         }
1452         cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
1453         return cq_obj;
1454 }
1455
1456 /**
1457  * Create VIRTQ using DevX API.
1458  *
1459  * @param[in] ctx
1460  *   Context returned from mlx5 open_device() glue function.
1461  * @param [in] attr
1462  *   Pointer to VIRTQ attributes structure.
1463  *
1464  * @return
1465  *   The DevX object created, NULL otherwise and rte_errno is set.
1466  */
1467 struct mlx5_devx_obj *
1468 mlx5_devx_cmd_create_virtq(void *ctx,
1469                            struct mlx5_devx_virtq_attr *attr)
1470 {
1471         uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
1472         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1473         struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__,
1474                                                      sizeof(*virtq_obj), 0);
1475         void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
1476         void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
1477         void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
1478
1479         if (!virtq_obj) {
1480                 DRV_LOG(ERR, "Failed to allocate virtq data.");
1481                 rte_errno = ENOMEM;
1482                 return NULL;
1483         }
1484         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1485                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1486         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1487                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1488         MLX5_SET16(virtio_net_q, virtq, hw_available_index,
1489                    attr->hw_available_index);
1490         MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index);
1491         MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4);
1492         MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6);
1493         MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum);
1494         MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum);
1495         MLX5_SET16(virtio_q, virtctx, virtio_version_1_0,
1496                    attr->virtio_version_1_0);
1497         MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode);
1498         MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id);
1499         MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr);
1500         MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr);
1501         MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr);
1502         MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
1503         MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size);
1504         MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey);
1505         MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id);
1506         MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size);
1507         MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset);
1508         MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id);
1509         MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size);
1510         MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset);
1511         MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
1512         MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
1513         MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
1514         MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id);
1515         MLX5_SET(virtio_q, virtctx, pd, attr->pd);
1516         MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
1517         virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1518                                                     sizeof(out));
1519         if (!virtq_obj->obj) {
1520                 rte_errno = errno;
1521                 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
1522                 rte_free(virtq_obj);
1523                 return NULL;
1524         }
1525         virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1526         return virtq_obj;
1527 }
1528
1529 /**
1530  * Modify VIRTQ using DevX API.
1531  *
1532  * @param[in] virtq_obj
1533  *   Pointer to virtq object structure.
1534  * @param [in] attr
1535  *   Pointer to modify virtq attributes structure.
1536  *
1537  * @return
1538  *   0 on success, a negative errno value otherwise and rte_errno is set.
1539  */
1540 int
1541 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
1542                            struct mlx5_devx_virtq_attr *attr)
1543 {
1544         uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
1545         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1546         void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
1547         void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
1548         void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
1549         int ret;
1550
1551         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1552                  MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1553         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1554                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1555         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
1556         MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type);
1557         MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
1558         switch (attr->type) {
1559         case MLX5_VIRTQ_MODIFY_TYPE_STATE:
1560                 MLX5_SET16(virtio_net_q, virtq, state, attr->state);
1561                 break;
1562         case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS:
1563                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey,
1564                          attr->dirty_bitmap_mkey);
1565                 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr,
1566                          attr->dirty_bitmap_addr);
1567                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size,
1568                          attr->dirty_bitmap_size);
1569                 break;
1570         case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE:
1571                 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable,
1572                          attr->dirty_bitmap_dump_enable);
1573                 break;
1574         default:
1575                 rte_errno = EINVAL;
1576                 return -rte_errno;
1577         }
1578         ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in),
1579                                          out, sizeof(out));
1580         if (ret) {
1581                 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
1582                 rte_errno = errno;
1583                 return -rte_errno;
1584         }
1585         return ret;
1586 }
1587
1588 /**
1589  * Query VIRTQ using DevX API.
1590  *
1591  * @param[in] virtq_obj
1592  *   Pointer to virtq object structure.
1593  * @param [in/out] attr
1594  *   Pointer to virtq attributes structure.
1595  *
1596  * @return
1597  *   0 on success, a negative errno value otherwise and rte_errno is set.
1598  */
1599 int
1600 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
1601                            struct mlx5_devx_virtq_attr *attr)
1602 {
1603         uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
1604         uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0};
1605         void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr);
1606         void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq);
1607         int ret;
1608
1609         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1610                  MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1611         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1612                  MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1613         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
1614         ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in),
1615                                          out, sizeof(out));
1616         if (ret) {
1617                 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
1618                 rte_errno = errno;
1619                 return -errno;
1620         }
1621         attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq,
1622                                               hw_available_index);
1623         attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index);
1624         return ret;
1625 }
1626
1627 /**
1628  * Create QP using DevX API.
1629  *
1630  * @param[in] ctx
1631  *   Context returned from mlx5 open_device() glue function.
1632  * @param [in] attr
1633  *   Pointer to QP attributes structure.
1634  *
1635  * @return
1636  *   The DevX object created, NULL otherwise and rte_errno is set.
1637  */
1638 struct mlx5_devx_obj *
1639 mlx5_devx_cmd_create_qp(void *ctx,
1640                         struct mlx5_devx_qp_attr *attr)
1641 {
1642         uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0};
1643         uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
1644         struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj),
1645                                                    0);
1646         void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1647
1648         if (!qp_obj) {
1649                 DRV_LOG(ERR, "Failed to allocate QP data.");
1650                 rte_errno = ENOMEM;
1651                 return NULL;
1652         }
1653         MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
1654         MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
1655         MLX5_SET(qpc, qpc, pd, attr->pd);
1656         if (attr->uar_index) {
1657                 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1658                 MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
1659                 MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
1660                          MLX5_ADAPTER_PAGE_SHIFT);
1661                 if (attr->sq_size) {
1662                         MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
1663                         MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
1664                         MLX5_SET(qpc, qpc, log_sq_size,
1665                                  rte_log2_u32(attr->sq_size));
1666                 } else {
1667                         MLX5_SET(qpc, qpc, no_sq, 1);
1668                 }
1669                 if (attr->rq_size) {
1670                         MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->rq_size));
1671                         MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
1672                         MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
1673                                  MLX5_LOG_RQ_STRIDE_SHIFT);
1674                         MLX5_SET(qpc, qpc, log_rq_size,
1675                                  rte_log2_u32(attr->rq_size));
1676                         MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
1677                 } else {
1678                         MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
1679                 }
1680                 if (attr->dbr_umem_valid) {
1681                         MLX5_SET(qpc, qpc, dbr_umem_valid,
1682                                  attr->dbr_umem_valid);
1683                         MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id);
1684                 }
1685                 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address);
1686                 MLX5_SET64(create_qp_in, in, wq_umem_offset,
1687                            attr->wq_umem_offset);
1688                 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id);
1689                 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
1690         } else {
1691                 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */
1692                 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
1693                 MLX5_SET(qpc, qpc, no_sq, 1);
1694         }
1695         qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1696                                                  sizeof(out));
1697         if (!qp_obj->obj) {
1698                 rte_errno = errno;
1699                 DRV_LOG(ERR, "Failed to create QP Obj using DevX.");
1700                 rte_free(qp_obj);
1701                 return NULL;
1702         }
1703         qp_obj->id = MLX5_GET(create_qp_out, out, qpn);
1704         return qp_obj;
1705 }
1706
1707 /**
1708  * Modify QP using DevX API.
1709  * Currently supports only force loop-back QP.
1710  *
1711  * @param[in] qp
1712  *   Pointer to QP object structure.
1713  * @param [in] qp_st_mod_op
1714  *   The QP state modification operation.
1715  * @param [in] remote_qp_id
1716  *   The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 int
1722 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
1723                               uint32_t remote_qp_id)
1724 {
1725         union {
1726                 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)];
1727                 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)];
1728                 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)];
1729         } in;
1730         union {
1731                 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)];
1732                 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)];
1733                 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)];
1734         } out;
1735         void *qpc;
1736         int ret;
1737         unsigned int inlen;
1738         unsigned int outlen;
1739
1740         memset(&in, 0, sizeof(in));
1741         memset(&out, 0, sizeof(out));
1742         MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op);
1743         switch (qp_st_mod_op) {
1744         case MLX5_CMD_OP_RST2INIT_QP:
1745                 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id);
1746                 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc);
1747                 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
1748                 MLX5_SET(qpc, qpc, rre, 1);
1749                 MLX5_SET(qpc, qpc, rwe, 1);
1750                 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1751                 inlen = sizeof(in.rst2init);
1752                 outlen = sizeof(out.rst2init);
1753                 break;
1754         case MLX5_CMD_OP_INIT2RTR_QP:
1755                 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id);
1756                 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc);
1757                 MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
1758                 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
1759                 MLX5_SET(qpc, qpc, mtu, 1);
1760                 MLX5_SET(qpc, qpc, log_msg_max, 30);
1761                 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id);
1762                 MLX5_SET(qpc, qpc, min_rnr_nak, 0);
1763                 inlen = sizeof(in.init2rtr);
1764                 outlen = sizeof(out.init2rtr);
1765                 break;
1766         case MLX5_CMD_OP_RTR2RTS_QP:
1767                 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc);
1768                 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id);
1769                 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 14);
1770                 MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
1771                 MLX5_SET(qpc, qpc, retry_count, 7);
1772                 MLX5_SET(qpc, qpc, rnr_retry, 7);
1773                 inlen = sizeof(in.rtr2rts);
1774                 outlen = sizeof(out.rtr2rts);
1775                 break;
1776         default:
1777                 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.",
1778                         qp_st_mod_op);
1779                 rte_errno = EINVAL;
1780                 return -rte_errno;
1781         }
1782         ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen);
1783         if (ret) {
1784                 DRV_LOG(ERR, "Failed to modify QP using DevX.");
1785                 rte_errno = errno;
1786                 return -rte_errno;
1787         }
1788         return ret;
1789 }
1790
1791 struct mlx5_devx_obj *
1792 mlx5_devx_cmd_create_virtio_q_counters(void *ctx)
1793 {
1794         uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0};
1795         uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1796         struct mlx5_devx_obj *couners_obj = rte_zmalloc(__func__,
1797                                                        sizeof(*couners_obj), 0);
1798         void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
1799
1800         if (!couners_obj) {
1801                 DRV_LOG(ERR, "Failed to allocate virtio queue counters data.");
1802                 rte_errno = ENOMEM;
1803                 return NULL;
1804         }
1805         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1806                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1807         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1808                  MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
1809         couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1810                                                       sizeof(out));
1811         if (!couners_obj->obj) {
1812                 rte_errno = errno;
1813                 DRV_LOG(ERR, "Failed to create virtio queue counters Obj using"
1814                         " DevX.");
1815                 rte_free(couners_obj);
1816                 return NULL;
1817         }
1818         couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1819         return couners_obj;
1820 }
1821
1822 int
1823 mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
1824                                    struct mlx5_devx_virtio_q_couners_attr *attr)
1825 {
1826         uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
1827         uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0};
1828         void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr);
1829         void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out,
1830                                                virtio_q_counters);
1831         int ret;
1832
1833         MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1834                  MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1835         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1836                  MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
1837         MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id);
1838         ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out,
1839                                         sizeof(out));
1840         if (ret) {
1841                 DRV_LOG(ERR, "Failed to query virtio q counters using DevX.");
1842                 rte_errno = errno;
1843                 return -errno;
1844         }
1845         attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
1846                                          received_desc);
1847         attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
1848                                           completed_desc);
1849         attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters,
1850                                     error_cqes);
1851         attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters,
1852                                          bad_desc_errors);
1853         attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters,
1854                                           exceed_max_chain);
1855         attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters,
1856                                         invalid_buffer);
1857         return ret;
1858 }